diff --git a/.gitignore b/.gitignore index 13e874780..6b8081823 100644 --- a/.gitignore +++ b/.gitignore @@ -28,3 +28,11 @@ _testmain.go .goxc.local.json .idea/ pandora.iml +*.log +*.out +out/ + +vendor/ + +docs/_* +.DS_Store diff --git a/.goxc.json b/.goxc.json index 67b7d63bb..adc9960bb 100644 --- a/.goxc.json +++ b/.goxc.json @@ -5,7 +5,7 @@ ], "Arch": "amd64", "Os": "linux darwin windows", - "PackageVersion": "0.1.2", + "PackageVersion": "0.1.3", "TaskSettings": { "publish-github": { "owner": "yandex", @@ -13,4 +13,4 @@ } }, "ConfigVersion": "0.9" -} \ No newline at end of file +} diff --git a/.travis.yml b/.travis.yml index e886bf480..9679cc1b5 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,17 +1,17 @@ language: go -go: - - 1.5.2 - - tip -before_install: - - go get github.com/amahi/spdy - - go get golang.org/x/net/context - - go get github.com/amahi/spdy - - go get github.com/stretchr/testify - - go get github.com/SlyMarbo/spdy - - go get github.com/davecgh/go-spew/spew - - go get github.com/pquerna/ffjson +go: + - 1.11.x + +env: + - GO111MODULE=on -install: make tools +go_import_path: github.com/yandex/pandora + +before_install: + - mkdir $HOME/tools + - GOPATH=$HOME/tools make tools + - export PATH=$HOME/tools/bin:$PATH + - make deps script: make travis diff --git a/AUTHORS b/AUTHORS index e4596dd87..ea2c27281 100644 --- a/AUTHORS +++ b/AUTHORS @@ -2,6 +2,7 @@ The following authors have created the source code of "Pandora" published and distributed by YANDEX LLC as the owner: Alexey Lavrenuke +Vladimir Skipor The following authors have licensed their contributions to YANDEX LLC and everyone who uses "Pandora" under the licensing terms detailed in LICENSE available at https://www.mozilla.org/en-US/MPL/2.0/: diff --git a/Makefile b/Makefile index 6184e87d3..20726e4b8 100644 --- a/Makefile +++ b/Makefile @@ -1,16 +1,16 @@ -.PHONY: all test lint vet fmt travis coverage checkfmt prepare updep +.PHONY: all test lint vet fmt travis coverage checkfmt prepare deps NO_COLOR=\033[0m OK_COLOR=\033[32;01m ERROR_COLOR=\033[31;01m WARN_COLOR=\033[33;01m -PKGSDIRS=$(shell find -L . -type f -name "*.go" -not -path "./Godeps/*") + all: test vet checkfmt -travis: test checkfmt vet coverage +travis: test checkfmt coverage -prepare: fmt test vet checkfmt #m updep +prepare: fmt test vet test: @echo "$(OK_COLOR)Test packages$(NO_COLOR)" @@ -21,37 +21,27 @@ coverage: @./script/coverage.sh -goveralls -coverprofile=gover.coverprofile -service=travis-ci -lint: - @echo "$(OK_COLOR)Run lint$(NO_COLOR)" - test -z "$$(golint -min_confidence 0.1 ./... | grep -v Godeps/_workspace/src/ | tee /dev/stderr)" - vet: @echo "$(OK_COLOR)Run vet$(NO_COLOR)" @go vet ./... - checkfmt: @echo "$(OK_COLOR)Check formats$(NO_COLOR)" @./script/checkfmt.sh . fmt: - @echo "$(OK_COLOR)Formatting$(NO_COLOR)" - @echo $(PKGSDIRS) | xargs -I '{p}' -n1 goimports -w {p} + @echo "$(OK_COLOR)Check fmt$(NO_COLOR)" + @echo "FIXME go fmt does not format imports, should be fixed" + @go fmt tools: @echo "$(OK_COLOR)Install tools$(NO_COLOR)" - go get github.com/tools/godep - go get golang.org/x/tools/cmd/goimports - go get github.com/golang/lint/golint - go get github.com/stretchr/testify + go install golang.org/x/tools/cmd/goimports go get golang.org/x/tools/cmd/cover go get github.com/modocache/gover go get github.com/mattn/goveralls -updep: - @echo "$(OK_COLOR)Update dependencies$(NO_COLOR)" - GOOS=linux godep save ./... - GOOS=linux godep update github.com/... - GOOS=linux godep update gopkg.in/... - GOOS=linux godep update golang.org/... - +deps: + $(info #Install dependencies...) + go mod tidy + go mod download diff --git a/README.md b/README.md index 28a09bf28..b02541986 100644 --- a/README.md +++ b/README.md @@ -2,23 +2,26 @@ [![Join the chat at https://gitter.im/yandex/pandora](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/yandex/pandora?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [![Build Status](https://travis-ci.org/yandex/pandora.svg)](https://travis-ci.org/yandex/pandora) -[![Coverage Status](https://coveralls.io/repos/yandex/pandora/badge.svg?branch=master&service=github)](https://coveralls.io/github/yandex/pandora?branch=master) +[![Coverage Status](https://coveralls.io/repos/yandex/pandora/badge.svg?branch=develop&service=github)](https://coveralls.io/github/yandex/pandora?branch=develop) +[![Read the Docs](https://readthedocs.org/projects/yandexpandora/badge/)](https://readthedocs.org/projects/yandexpandora/) -A load generator in Go language. +Pandora is a high-performance load generator in Go language. It has built-in HTTP(S) and HTTP/2 support and you can write your own load scenarios in Go, compiling them just before your test. -## Install -Compile a binary with go tool (use go >= 1.5.2): -``` -go get github.com/yandex/pandora -go build github.com/yandex/pandora -``` +## How to start -There are also [binary releases](https://github.com/yandex/pandora/releases) available. +### Binary releases +[Download](https://github.com/yandex/pandora/releases) available. -Run this binary with your .json config (see [examples](https://github.com/yandex/pandora/tree/master/example/config)): -``` -./pandora myconfig.json +### Building from sources +We use go 1.11 modules. +If you build pandora inside $GOPATH, please make sure you have env variable `GO111MODULE` set to `on`. +```bash +git clone https://github.com/yandex/pandora.git +cd pandora +make deps +go install ``` + Or let [Yandex.Tank](https://yandextank.readthedocs.io/en/latest/core_and_modules.html#pandora) make it easy for you. @@ -26,57 +29,18 @@ Or let [Yandex.Tank](https://yandextank.readthedocs.io/en/latest/core_and_module You can write plugins with the next [extension points](https://github.com/progrium/go-extpoints): +You can also cross-compile for other arch/os: ``` -ammo.Provider -aggregate.ResultListener -limiter.Limiter -gun.Gun +GOOS=linux GOARCH=amd64 go build ``` -## Build tags +### Running your tests +Run the binary with your config (see config examples at [examples](https://github.com/yandex/pandora/tree/develop/examples)): -If you don want to build pandora without http gun: -``` -go build -tags 'noHttpGun' github.com/yandex/pandora +```bash +# $GOBIN should be added to $PATH +pandora myconfig.yaml ``` -If you don want to build pandora without spdy gun: -``` -go build -tags 'noSpdyGun' github.com/yandex/pandora -``` - -## Basic concepts - -### Architectural scheme - -See architectural scheme source in ```docs/architecture.graphml```. It was created with -[YeD](https://www.yworks.com/en/products/yfiles/yed/) editor, so you’ll probably -need it to open the file. - -![Architectural scheme](/docs/architecture.png) - -Pandora is made of components. Components talk to each other over the channels. There are different types of components. - -### Component types - -#### Ammo provider and decoder - -Ammo decoder knows how to make an ammo object from an ammo file or other external resource. Ammo provider uses a decoder -to decode ammo and passes ammo objects to the Users. - -#### User pool - -User pool controls the creation of Users. All users from one user pool will get ammo from one ammo provider. User creation -schedule is controlled with Startup Limiter. All users from one user pool will also have guns of the same type. - -#### Limiters - -Limiters are objects that will put messages to its underlying channel according to a schedule. User creation, shooting or -other processes thus can be controlled by a Limiter. - -#### Users and Guns -User takes an ammo, waits for a limiter tick and then shoots with a Gun it has. Guns are the tools to send a request to your -service and measure the parameters of the response. - -#### Result Listener -Result listener's task is to collect measured samples and save them somewhere. +Or use Pandora with [Yandex.Tank](http://yandextank.readthedocs.org/en/latest/configuration.html#pandora) and +[Overload](https://overload.yandex.net). diff --git a/acceptance_tests/acceptance_suite_test.go b/acceptance_tests/acceptance_suite_test.go new file mode 100644 index 000000000..d3d22e4fb --- /dev/null +++ b/acceptance_tests/acceptance_suite_test.go @@ -0,0 +1,189 @@ +package acceptance + +import ( + "encoding/json" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "testing" + "time" + + "github.com/ghodss/yaml" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/onsi/gomega/gbytes" + "github.com/onsi/gomega/gexec" + "go.uber.org/zap" + + "github.com/yandex/pandora/lib/ginkgoutil" + "github.com/yandex/pandora/lib/tag" +) + +var pandoraBin string + +func TestAcceptanceTests(t *testing.T) { + ginkgoutil.SetupSuite() + var args []string + if tag.Race { + zap.L().Debug("Building with race detector") + args = append(args, "-race") + } + if tag.Debug { + zap.L().Debug("Building with debug tag") + args = append(args, "-tags", "debug") + } + var err error + pandoraBin, err = gexec.Build("github.com/yandex/pandora", args...) + if err != nil { + t.Fatal(err) + } + defer gexec.CleanupBuildArtifacts() + RunSpecs(t, "AcceptanceTests Suite") +} + +type TestConfig struct { + // Default way to pass config to pandora. + PandoraConfig + // RawConfig overrides Pandora. + RawConfig string + ConfigName string // Without extension. "load" by default. + UseJSON bool // Using YAML by default. + CmdArgs []string // Nothing by default. + Files map[string]string // Extra files to put in dir. Ammo, etc. +} + +func NewTestConfig() *TestConfig { + return &TestConfig{ + PandoraConfig: PandoraConfig{ + Pool: []*InstancePoolConfig{NewInstansePoolConfig()}, + }, + Files: map[string]string{}, + } +} + +// PandoraConfig will be encoded to file via github.com/ghodss/yaml that supports json tags. +// Or it can be encoded as JSON too, to test pandora JSON support. +type PandoraConfig struct { + Pool []*InstancePoolConfig `json:"pools"` + LogConfig `json:"log,omitempty"` + MonitoringConfig `json:"monitoring,omitempty"` +} + +type LogConfig struct { + Level string `json:"level,omitempty"` + File string `json:"file,omitempty"` +} + +type MonitoringConfig struct { + Expvar *expvarConfig + CPUProfile *cpuprofileConfig + MemProfile *memprofileConfig +} + +type expvarConfig struct { + Enabled bool `json:"enabled"` + Port int `json:"port"` +} + +type cpuprofileConfig struct { + Enabled bool `json:"enabled"` + File string `json:"file"` +} + +type memprofileConfig struct { + Enabled bool `json:"enabled"` + File string `json:"file"` +} + +func (pc *PandoraConfig) Append(ipc *InstancePoolConfig) { + pc.Pool = append(pc.Pool, ipc) +} + +func NewInstansePoolConfig() *InstancePoolConfig { + return &InstancePoolConfig{ + Provider: map[string]interface{}{}, + Aggregator: map[string]interface{}{}, + Gun: map[string]interface{}{}, + RPSSchedule: map[string]interface{}{}, + StartupSchedule: map[string]interface{}{}, + } + +} + +type InstancePoolConfig struct { + Id string + Provider map[string]interface{} `json:"ammo"` + Aggregator map[string]interface{} `json:"result"` + Gun map[string]interface{} `json:"gun"` + RPSPerInstance bool `json:"rps-per-instance"` + RPSSchedule interface{} `json:"rps"` + StartupSchedule interface{} `json:"startup"` +} + +type PandoraTester struct { + *gexec.Session + // TestDir is working dir of launched pandora. + // It contains config and ammo files, and will be removed after test execution. + // All files created during a test should created in this dir. + TestDir string + Config *TestConfig +} + +func NewTester(conf *TestConfig) *PandoraTester { + testDir, err := ioutil.TempDir("", "pandora_acceptance_") + Expect(err).ToNot(HaveOccurred()) + if conf.ConfigName == "" { + conf.ConfigName = "load" + } + extension := "yaml" + if conf.UseJSON { + extension = "json" + } + var confData []byte + + if conf.RawConfig != "" { + confData = []byte(conf.RawConfig) + } else { + if conf.UseJSON { + confData, err = json.Marshal(conf.PandoraConfig) + } else { + confData, err = yaml.Marshal(conf.PandoraConfig) + } + Expect(err).ToNot(HaveOccurred()) + } + confAbsName := filepath.Join(testDir, conf.ConfigName+"."+extension) + err = ioutil.WriteFile(confAbsName, confData, 0644) + Expect(err).ToNot(HaveOccurred()) + + for file, data := range conf.Files { + fileAbsName := filepath.Join(testDir, file) + err = ioutil.WriteFile(fileAbsName, []byte(data), 0644) + Expect(err).ToNot(HaveOccurred()) + } + + command := exec.Command(pandoraBin, conf.CmdArgs...) + command.Dir = testDir + session, err := gexec.Start(command, GinkgoWriter, GinkgoWriter) + Expect(err).ToNot(HaveOccurred()) + + tt := &PandoraTester{ + Session: session, + TestDir: testDir, + Config: conf, + } + return tt +} + +func (pt *PandoraTester) ShouldSay(pattern string) { + EventuallyWithOffset(1, pt.Out, 3*time.Second).Should(gbytes.Say(pattern)) +} + +func (pt *PandoraTester) ExitCode() int { + return pt.Session.Wait(5).ExitCode() +} + +func (pt *PandoraTester) Close() { + pt.Terminate() + os.RemoveAll(pt.TestDir) +} diff --git a/acceptance_tests/http_test.go b/acceptance_tests/http_test.go new file mode 100644 index 000000000..cfdccd44c --- /dev/null +++ b/acceptance_tests/http_test.go @@ -0,0 +1,139 @@ +package acceptance + +import ( + "net/http" + + "golang.org/x/net/http2" + + "net/http/httptest" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "go.uber.org/atomic" +) + +var _ = Describe("http", func() { + var ( + server *httptest.Server + conf *TestConfig + tester *PandoraTester + ) + ServerEndpoint := func() string { + Expect(server).NotTo(BeNil()) + return server.Listener.Addr().String() + } + BeforeEach(func() { + conf = NewTestConfig() + tester = nil + }) + JustBeforeEach(func() { + if server != nil { + Expect(server.URL).NotTo(BeEmpty(), "Please start server manually") + } + tester = NewTester(conf) + }) + AfterEach(func() { + tester.Close() + if server != nil { + server.Close() + server = nil + } + }) + + Context("uri ammo", func() { + var ( + requetsCount atomic.Int64 // Request served by test server. + gunConfig map[string]interface{} // gunConfig config section. + ) + const ( + Requests = 4 + Instances = 2 + OutFile = "out.log" + ) + BeforeEach(func() { + requetsCount.Store(0) + server = httptest.NewUnstartedServer(http.HandlerFunc( + func(rw http.ResponseWriter, req *http.Request) { + requetsCount.Inc() + rw.WriteHeader(http.StatusOK) + })) + + conf.Pool[0].Gun = map[string]interface{}{ + // Set type in test. + "target": ServerEndpoint(), + } + const ammoFile = "ammo.uri" + conf.Pool[0].Provider = map[string]interface{}{ + "type": "uri", + "file": ammoFile, + } + conf.Files[ammoFile] = "/" + conf.Pool[0].Aggregator = map[string]interface{}{ + "type": "phout", + "destination": OutFile, + } + conf.Pool[0].RPSSchedule = []map[string]interface{}{ + {"type": "once", "times": Requests / 2}, + {"type": "const", "ops": Requests, "duration": "0.5s"}, + } + conf.Pool[0].StartupSchedule = []map[string]interface{}{ + {"type": "once", "times": Instances}, + } + gunConfig = conf.Pool[0].Gun + conf.Level = "debug" + }) + itOk := func() { + It("ok", func() { + exitCode := tester.ExitCode() + Expect(exitCode).To(BeZero(), "Pandora finish execution with non zero code") + Expect(requetsCount.Load()).To(BeEquivalentTo(Requests)) + // TODO(skipor): parse and check phout output + }) + } + + Context("http", func() { + BeforeEach(func() { + server.Start() + gunConfig["type"] = "http" + }) + itOk() + }) + + Context("https", func() { + BeforeEach(func() { + server.StartTLS() + gunConfig["type"] = "http" + gunConfig["ssl"] = true + }) + itOk() + }) + + Context("http2", func() { + Context("target support HTTP/2", func() { + BeforeEach(func() { + startHTTP2(server) + gunConfig["type"] = "http2" + }) + itOk() + }) + Context("target DOESN'T support HTTP/2", func() { + BeforeEach(func() { + server.StartTLS() + gunConfig["type"] = "http2" + }) + It("ok", func() { + exitCode := tester.ExitCode() + Expect(exitCode).NotTo(BeZero(), "Pandora should fail") + }) + + }) + }) + + }) +}) + +func startHTTP2(server *httptest.Server) { + http2.ConfigureServer(server.Config, nil) + server.TLS = server.Config.TLSConfig + server.StartTLS() +} diff --git a/aggregate/aggregate.go b/aggregate/aggregate.go deleted file mode 100644 index 28dc6e3d4..000000000 --- a/aggregate/aggregate.go +++ /dev/null @@ -1,33 +0,0 @@ -package aggregate - -import "golang.org/x/net/context" - -type ResultListener interface { - Start(context.Context) error - Sink() chan<- *Sample -} - -type resultListener struct { - sink chan<- *Sample -} - -func (rl *resultListener) Sink() chan<- *Sample { - return rl.sink -} - -func Drain(ctx context.Context, results <-chan *Sample) []*Sample { - samples := []*Sample{} -loop: - for { - select { - case a, more := <-results: - if !more { - break loop - } - samples = append(samples, a) - case <-ctx.Done(): - break loop - } - } - return samples -} diff --git a/aggregate/logger.go b/aggregate/logger.go deleted file mode 100644 index d9b60cc38..000000000 --- a/aggregate/logger.go +++ /dev/null @@ -1,51 +0,0 @@ -package aggregate - -import ( - "log" - - "github.com/yandex/pandora/config" - "golang.org/x/net/context" -) - -// Implements ResultListener interface -type LoggingResultListener struct { - resultListener - - source <-chan *Sample -} - -func (rl *LoggingResultListener) handle(s *Sample) { - log.Println(s) - ReleaseSample(s) -} - -func (rl *LoggingResultListener) Start(ctx context.Context) error { -loop: - for { - select { - case r := <-rl.source: - rl.handle(r) - case <-ctx.Done(): - // Context is done, but we should read all data from source - for { - select { - case r := <-rl.source: - rl.handle(r) - default: - break loop - } - } - } - } - return nil -} - -func NewLoggingResultListener(*config.ResultListener) (ResultListener, error) { - ch := make(chan *Sample, 32) - return &LoggingResultListener{ - source: ch, - resultListener: resultListener{ - sink: ch, - }, - }, nil -} diff --git a/aggregate/phout.go b/aggregate/phout.go deleted file mode 100644 index d1c5aaaae..000000000 --- a/aggregate/phout.go +++ /dev/null @@ -1,102 +0,0 @@ -package aggregate - -import ( - "bufio" - "os" - "time" - - "github.com/yandex/pandora/config" - "golang.org/x/net/context" -) - -type PhoutResultListener struct { - resultListener - - source <-chan *Sample - phout *bufio.Writer - buffer []byte -} - -func (rl *PhoutResultListener) handle(s *Sample) error { - - rl.buffer = s.AppendToPhout(rl.buffer) - _, err := rl.phout.Write(rl.buffer) - rl.buffer = rl.buffer[:0] - ReleaseSample(s) - return err -} - -func (rl *PhoutResultListener) Start(ctx context.Context) error { - defer rl.phout.Flush() - shouldFlush := time.NewTicker(1 * time.Second).C -loop: - for { - select { - case r := <-rl.source: - if err := rl.handle(r); err != nil { - return err - } - select { - case <-shouldFlush: - rl.phout.Flush() - default: - } - case <-time.After(1 * time.Second): - rl.phout.Flush() - case <-ctx.Done(): - // Context is done, but we should read all data from source - for { - select { - case r := <-rl.source: - if err := rl.handle(r); err != nil { - return err - } - default: - break loop - } - } - } - } - return nil -} - -func NewPhoutResultListener(filename string) (rl ResultListener, err error) { - var phoutFile *os.File - if filename == "" { - phoutFile = os.Stdout - } else { - phoutFile, err = os.OpenFile(filename, os.O_APPEND|os.O_WRONLY|os.O_CREATE|os.O_SYNC, 0666) - } - writer := bufio.NewWriterSize(phoutFile, 1024*512) // 512 KB - ch := make(chan *Sample, 65536) - return &PhoutResultListener{ - source: ch, - resultListener: resultListener{ - sink: ch, - }, - phout: writer, - buffer: make([]byte, 0, 1024), - }, nil -} - -type phoutResultListeners map[string]ResultListener - -func (prls phoutResultListeners) get(c *config.ResultListener) (ResultListener, error) { - rl, ok := prls[c.Destination] - if !ok { - rl, err := NewPhoutResultListener(c.Destination) - if err != nil { - return nil, err - } - prls[c.Destination] = rl - return rl, nil - } - return rl, nil -} - -var defaultPhoutResultListeners = phoutResultListeners{} - -// GetPhoutResultListener is not thread safe. -func GetPhoutResultListener(c *config.ResultListener) (ResultListener, error) { - return defaultPhoutResultListeners.get(c) -} diff --git a/aggregate/sample.go b/aggregate/sample.go deleted file mode 100644 index ccca741b9..000000000 --- a/aggregate/sample.go +++ /dev/null @@ -1,97 +0,0 @@ -package aggregate - -import ( - "fmt" - "strconv" - "sync" -) - -const ( - phoutDelimiter = '\t' - phoutNewLine = '\n' -) - -var samplePool = &sync.Pool{New: func() interface{} { return &Sample{} }} - -type Sample struct { - TS float64 - Tag string - RT int - Connect int - Send int - Latency int - Receive int - IntervalEvent int - Egress int - Igress int - NetCode int - ProtoCode int - Err error -} - -func AcquireSample(ts float64, tag string) *Sample { - s := samplePool.Get().(*Sample) - s.TS = ts - s.Tag = tag - s.RT = 0 - s.Connect = 0 - s.Send = 0 - s.Latency = 0 - s.Receive = 0 - s.IntervalEvent = 0 - s.Egress = 0 - s.Igress = 0 - s.NetCode = 0 - s.ProtoCode = 0 - s.Err = nil - return s -} - -func ReleaseSample(s *Sample) { - samplePool.Put(s) -} - -func (ps *Sample) String() string { - return fmt.Sprintf( - "%.3f\t%s\t%d\t"+ - "%d\t%d\t"+ - "%d\t%d\t"+ - "%d\t"+ - "%d\t%d\t"+ - "%d\t%d", - ps.TS, ps.Tag, ps.RT, - ps.Connect, ps.Send, - ps.Latency, ps.Receive, - ps.IntervalEvent, - ps.Egress, ps.Igress, - ps.NetCode, ps.ProtoCode, - ) -} - -func (ps *Sample) AppendToPhout(dst []byte) []byte { - dst = strconv.AppendFloat(dst, ps.TS, 'f', 3, 64) - dst = append(dst, phoutDelimiter) - dst = append(dst, ps.Tag...) - dst = append(dst, phoutDelimiter) - dst = strconv.AppendInt(dst, int64(ps.RT), 10) - dst = append(dst, phoutDelimiter) - dst = strconv.AppendInt(dst, int64(ps.Connect), 10) - dst = append(dst, phoutDelimiter) - dst = strconv.AppendInt(dst, int64(ps.Send), 10) - dst = append(dst, phoutDelimiter) - dst = strconv.AppendInt(dst, int64(ps.Latency), 10) - dst = append(dst, phoutDelimiter) - dst = strconv.AppendInt(dst, int64(ps.Receive), 10) - dst = append(dst, phoutDelimiter) - dst = strconv.AppendInt(dst, int64(ps.IntervalEvent), 10) - dst = append(dst, phoutDelimiter) - dst = strconv.AppendInt(dst, int64(ps.Egress), 10) - dst = append(dst, phoutDelimiter) - dst = strconv.AppendInt(dst, int64(ps.Igress), 10) - dst = append(dst, phoutDelimiter) - dst = strconv.AppendInt(dst, int64(ps.NetCode), 10) - dst = append(dst, phoutDelimiter) - dst = strconv.AppendInt(dst, int64(ps.ProtoCode), 10) - dst = append(dst, phoutNewLine) - return dst -} diff --git a/ammo/ammo.go b/ammo/ammo.go deleted file mode 100644 index f8905ab2d..000000000 --- a/ammo/ammo.go +++ /dev/null @@ -1,64 +0,0 @@ -package ammo - -import ( - "sync" - - "golang.org/x/net/context" -) - -type Provider interface { - Start(context.Context) error - Source() <-chan Ammo - Release(Ammo) // return unused Ammo object to memory pool -} - -type BaseProvider struct { - decoder Decoder - source <-chan Ammo - pool sync.Pool -} - -type Ammo interface{} - -type Decoder interface { - Decode([]byte, Ammo) (Ammo, error) -} - -func NewBaseProvider(source <-chan Ammo, decoder Decoder, New func() interface{}) *BaseProvider { - return &BaseProvider{ - source: source, - decoder: decoder, - pool: sync.Pool{New: New}, - } -} - -func (ap *BaseProvider) Source() <-chan Ammo { - return ap.source -} - -func (ap *BaseProvider) Release(a Ammo) { - ap.pool.Put(a) -} - -func (ap *BaseProvider) decode(src []byte) (Ammo, error) { - a := ap.pool.Get() - return ap.decoder.Decode(src, a) -} - -// Drain reads all ammos from ammo.Provider. Useful for tests. -func Drain(ctx context.Context, p Provider) []Ammo { - ammos := []Ammo{} -loop: - for { - select { - case a, more := <-p.Source(): - if !more { - break loop - } - ammos = append(ammos, a) - case <-ctx.Done(): - break loop - } - } - return ammos -} diff --git a/ammo/dummy.go b/ammo/dummy.go deleted file mode 100644 index e3ba236cd..000000000 --- a/ammo/dummy.go +++ /dev/null @@ -1,65 +0,0 @@ -package ammo - -import ( - "encoding/json" - "fmt" - "log" - - "github.com/yandex/pandora/config" - "golang.org/x/net/context" -) - -type Log struct { - Message string -} - -// LogJSONDecoder implements ammo.Decoder interface -type LogJSONDecoder struct{} - -func (*LogJSONDecoder) Decode(jsonDoc []byte, a Ammo) (Ammo, error) { - err := json.Unmarshal(jsonDoc, a) - return a, err -} - -func NewLogJSONDecoder() Decoder { - return &LogJSONDecoder{} -} - -type LogProvider struct { - *BaseProvider - - sink chan<- Ammo - size int -} - -func (ap *LogProvider) Start(ctx context.Context) error { - defer close(ap.sink) -loop: - for i := 0; i < ap.size; i++ { - if a, err := ap.decode([]byte(fmt.Sprintf(`{"message": "Job #%d"}`, i))); err == nil { - select { - case ap.sink <- a: - case <-ctx.Done(): - break loop - } - } else { - return fmt.Errorf("Error decoding log ammo: %s", err) - } - } - log.Println("Ran out of ammo") - return nil -} - -func NewLogAmmoProvider(c *config.AmmoProvider) (Provider, error) { - ammoCh := make(chan Ammo, 128) - ap := &LogProvider{ - size: c.AmmoLimit, - sink: ammoCh, - BaseProvider: NewBaseProvider( - ammoCh, - NewLogJSONDecoder(), - func() interface{} { return &Log{} }, - ), - } - return ap, nil -} diff --git a/ammo/expvar.go b/ammo/expvar.go deleted file mode 100644 index 579cf66f6..000000000 --- a/ammo/expvar.go +++ /dev/null @@ -1,55 +0,0 @@ -package ammo - -import ( - "expvar" - "log" - "strconv" - "sync/atomic" - "time" -) - -type Counter struct { - i int64 -} - -func (c *Counter) String() string { - return strconv.FormatInt(atomic.LoadInt64(&c.i), 10) -} - -func (c *Counter) Add(delta int64) { - atomic.AddInt64(&c.i, delta) -} - -func (c *Counter) Set(value int64) { - atomic.StoreInt64(&c.i, value) -} - -func (c *Counter) Get() int64 { - return atomic.LoadInt64(&c.i) -} - -func NewCounter(name string) *Counter { - v := &Counter{} - expvar.Publish(name, v) - return v -} - -var ( - evPassesLeft = NewCounter("ammo_PassesLeft") -) - -func init() { - go func() { - passesLeft := evPassesLeft.Get() - for _ = range time.NewTicker(1 * time.Second).C { - if passesLeft < 0 { - return // infinite number of passes - } - newPassesLeft := evPassesLeft.Get() - if newPassesLeft != passesLeft { - log.Printf("[AMMO] passes left: %d", newPassesLeft) - passesLeft = newPassesLeft - } - } - }() -} diff --git a/ammo/http.go b/ammo/http.go deleted file mode 100644 index 1ff32db4d..000000000 --- a/ammo/http.go +++ /dev/null @@ -1,99 +0,0 @@ -//go:generate ffjson $GOFILE - -package ammo - -import ( - "bufio" - "fmt" - "log" - "os" - - "github.com/yandex/pandora/config" - "golang.org/x/net/context" -) - -// ffjson: noencoder -type Http struct { - Host string - Method string - Uri string - Headers map[string]string - Tag string -} - -// HttpJSONDecoder implements ammo.Decoder interface -type HttpJSONDecoder struct{} - -func (d *HttpJSONDecoder) Decode(jsonDoc []byte, a Ammo) (Ammo, error) { - err := a.(*Http).UnmarshalJSON(jsonDoc) - return a, err -} - -// ffjson: skip -type HttpProvider struct { - *BaseProvider - - sink chan<- Ammo - ammoFileName string - ammoLimit int - passes int -} - -func (ap *HttpProvider) Start(ctx context.Context) error { - defer close(ap.sink) - ammoFile, err := os.Open(ap.ammoFileName) - if err != nil { - return fmt.Errorf("failed to open ammo source: %v", err) - } - defer ammoFile.Close() - ammoNumber := 0 - passNum := 0 -loop: - for { - passNum++ - scanner := bufio.NewScanner(ammoFile) - scanner.Split(bufio.ScanLines) - for scanner.Scan() && (ap.ammoLimit == 0 || ammoNumber < ap.ammoLimit) { - data := scanner.Bytes() - if a, err := ap.decode(data); err != nil { - return fmt.Errorf("failed to decode ammo: %v", err) - } else { - ammoNumber++ - select { - case ap.sink <- a: - case <-ctx.Done(): - break loop - } - } - } - if ap.passes != 0 && passNum >= ap.passes { - break - } - ammoFile.Seek(0, 0) - if ap.passes == 0 { - evPassesLeft.Set(-1) - //log.Printf("Restarted ammo from the beginning. Infinite passes.\n") - } else { - evPassesLeft.Set(int64(ap.passes - passNum)) - //log.Printf("Restarted ammo from the beginning. Passes left: %d\n", ap.passes-passNum) - } - } - log.Println("Ran out of ammo") - return nil -} - -func NewHttpProvider(c *config.AmmoProvider) (Provider, error) { - ammoCh := make(chan Ammo) - ap := &HttpProvider{ - ammoLimit: c.AmmoLimit, - passes: c.Passes, - ammoFileName: c.AmmoSource, - sink: ammoCh, - BaseProvider: NewBaseProvider( - ammoCh, - &HttpJSONDecoder{}, - func() interface{} { return &Http{} }, - ), - } - return ap, nil -} diff --git a/ammo/http_test.go b/ammo/http_test.go deleted file mode 100644 index 5c2aa6882..000000000 --- a/ammo/http_test.go +++ /dev/null @@ -1,126 +0,0 @@ -package ammo - -import ( - "bufio" - "errors" - "os" - "sync" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "golang.org/x/net/context" - - "github.com/yandex/pandora/config" - "github.com/yandex/pandora/utils" -) - -const ( - httpTestFilename = "./testdata/ammo.jsonline" -) - -func TestNewHttpProvider(t *testing.T) { - c := &config.AmmoProvider{ - AmmoSource: httpTestFilename, - AmmoLimit: 10, - } - provider, err := NewHttpProvider(c) - require.NoError(t, err) - - httpProvider, casted := provider.(*HttpProvider) - require.True(t, casted, "NewHttpProvider should return *HttpProvider type") - - // look at defaults - assert.Equal(t, 10, httpProvider.ammoLimit) - assert.Equal(t, 0, httpProvider.passes) - assert.NotNil(t, httpProvider.sink) - assert.NotNil(t, httpProvider.BaseProvider.source) - assert.NotNil(t, httpProvider.BaseProvider.decoder) - -} - -func TestHttpProvider(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - providerCtx, _ := context.WithCancel(ctx) - - ammoCh := make(chan Ammo, 128) - provider := &HttpProvider{ - passes: 2, - ammoFileName: httpTestFilename, - sink: ammoCh, - BaseProvider: NewBaseProvider( - ammoCh, - &HttpJSONDecoder{}, - func() interface{} { return &Http{} }, - ), - } - promise := utils.PromiseCtx(providerCtx, provider.Start) - - ammos := Drain(ctx, provider) - require.Len(t, ammos, 25*2) // two passes - - httpAmmo, casted := (ammos[2]).(*Http) - require.True(t, casted, "Ammo should have *Http type") - - assert.Equal(t, "example.org", httpAmmo.Host) - assert.Equal(t, "/02", httpAmmo.Uri) - assert.Equal(t, "hello", httpAmmo.Tag) - assert.Equal(t, "GET", httpAmmo.Method) - assert.Len(t, httpAmmo.Headers, 4) - - // TODO: add test for decoding error - - select { - case err := <-promise: - require.NoError(t, err) - case <-ctx.Done(): - t.Fatal(ctx.Err()) - } -} - -var result Ammo - -func BenchmarkJsonDecoder(b *testing.B) { - f, err := os.Open(httpTestFilename) - decoder := &HttpJSONDecoder{} - if err != nil { - b.Fatal(err) - } - defer f.Close() - r := bufio.NewReader(f) - jsonDoc, isPrefix, err := r.ReadLine() - if err != nil || isPrefix { - b.Fatal(errors.New("Couldn't properly read ammo sample from data file")) - } - var a Ammo - for n := 0; n < b.N; n++ { - a, _ = decoder.Decode(jsonDoc, &Http{}) - } - _ = a -} - -func BenchmarkJsonDecoderWithPool(b *testing.B) { - f, err := os.Open(httpTestFilename) - decoder := &HttpJSONDecoder{} - if err != nil { - b.Fatal(err) - } - defer f.Close() - r := bufio.NewReader(f) - jsonDoc, isPrefix, err := r.ReadLine() - if err != nil || isPrefix { - b.Fatal(errors.New("Couldn't properly read ammo sample from data file")) - } - var a Ammo - pool := sync.Pool{ - New: func() interface{} { return &Http{} }, - } - for n := 0; n < b.N; n++ { - h := pool.Get().(*Http) - a, _ = decoder.Decode(jsonDoc, h) - pool.Put(h) - } - _ = a -} diff --git a/ammo/testdata/ammo.jsonline b/ammo/testdata/ammo.jsonline deleted file mode 100644 index 47c3d85af..000000000 --- a/ammo/testdata/ammo.jsonline +++ /dev/null @@ -1,25 +0,0 @@ -{"uri": "/00", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/01", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"tag": "hello", "uri": "/02", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/03", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/04", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/05", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/06", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/07", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"tag": "hello", "uri": "/08", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/09", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/10", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/11", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/12", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/13", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/14", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/15", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/16", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/17", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/18", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/19", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/20", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/21", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/22", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/23", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"tag": "hello", "uri": "/24", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} diff --git a/cli/cli.go b/cli/cli.go new file mode 100644 index 000000000..cfc0317ff --- /dev/null +++ b/cli/cli.go @@ -0,0 +1,289 @@ +package cli + +import ( + "bufio" + "context" + "flag" + "fmt" + "io/ioutil" + "net/http" + "os" + "os/signal" + "runtime" + "runtime/pprof" + "strconv" + "strings" + "syscall" + "time" + + "github.com/spf13/viper" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + + "github.com/yandex/pandora/core/config" + "github.com/yandex/pandora/core/engine" + "github.com/yandex/pandora/lib/zaputil" +) + +const Version = "0.3.0" +const defaultConfigFile = "load" +const stdinConfigSelector = "-" + +var useStdinConfig = false +var configSearchDirs = []string{"./", "./config", "/etc/pandora"} + +type cliConfig struct { + Engine engine.Config `config:",squash"` + Log logConfig `config:"log"` + Monitoring monitoringConfig `config:"monitoring"` +} + +type logConfig struct { + Level zapcore.Level `config:"level"` + File string `config:"file"` +} + +// TODO(skipor): log sampling with WARN when first message is dropped, and WARN at finish with all +// filtered out entries num. Message is filtered out when zapcore.CoreEnable returns true but +// zapcore.Core.Check return nil. +func newLogger(conf logConfig) *zap.Logger { + zapConf := zap.NewDevelopmentConfig() + zapConf.OutputPaths = []string{conf.File} + zapConf.Level.SetLevel(conf.Level) + log, err := zapConf.Build(zap.AddCaller()) + if err != nil { + zap.L().Fatal("Logger build failed", zap.Error(err)) + } + return log +} + +func defaultConfig() *cliConfig { + return &cliConfig{ + Log: logConfig{ + Level: zap.InfoLevel, + File: "stdout", + }, + Monitoring: monitoringConfig{ + Expvar: &expvarConfig{ + Enabled: false, + Port: 1234, + }, + CPUProfile: &cpuprofileConfig{ + Enabled: false, + File: "cpuprofile.log", + }, + MemProfile: &memprofileConfig{ + Enabled: false, + File: "memprofile.log", + }, + }, + } +} + +// TODO(skipor): make nice spf13/cobra CLI and integrate it with viper +// TODO(skipor): on special command (help or smth else) print list of available plugins + +func Run() { + flag.Usage = func() { + fmt.Fprintf(os.Stderr, "Usage of Pandora: pandora []\n"+" is './%s.(yaml|json|...)' by default\n", defaultConfigFile) + flag.PrintDefaults() + } + var ( + example bool + ) + flag.BoolVar(&example, "example", false, "print example config to STDOUT and exit") + flag.Parse() + + if example { + panic("Not implemented yet") + // TODO: print example config file content + } + + conf := readConfig() + log := newLogger(conf.Log) + zap.ReplaceGlobals(log) + zap.RedirectStdLog(log) + + closeMonitoring := startMonitoring(conf.Monitoring) + defer closeMonitoring() + m := newEngineMetrics() + startReport(m) + + pandora := engine.New(log, m, conf.Engine) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + errs := make(chan error) + go runEngine(ctx, pandora, errs) + + sigs := make(chan os.Signal, 2) + signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) + + // waiting for signal or error message from engine + select { + case sig := <-sigs: + switch sig { + case syscall.SIGINT: + const interruptTimeout = 5 * time.Second + log.Info("SIGINT received. Trying to stop gracefully.", zap.Duration("timeout", interruptTimeout)) + cancel() + select { + case <-time.After(interruptTimeout): + log.Fatal("Interrupt timeout exceeded") + case sig := <-sigs: + log.Fatal("Another signal received. Quiting.", zap.Stringer("signal", sig)) + case err := <-errs: + log.Fatal("Engine interrupted", zap.Error(err)) + } + case syscall.SIGTERM: + log.Fatal("SIGTERM received. Quiting.") + default: + log.Fatal("Unexpected signal received. Quiting.", zap.Stringer("signal", sig)) + } + case err := <-errs: + switch err { + case nil: + log.Info("Pandora engine successfully finished it's work") + case err: + const awaitTimeout = 3 * time.Second + log.Error("Engine run failed. Awaiting started tasks.", zap.Error(err), zap.Duration("timeout", awaitTimeout)) + cancel() + time.AfterFunc(awaitTimeout, func() { + log.Fatal("Engine tasks timeout exceeded.") + }) + pandora.Wait() + log.Fatal("Engine run failed. Pandora graceful shutdown successfully finished") + } + } + log.Info("Engine run successfully finished") +} + +func runEngine(ctx context.Context, engine *engine.Engine, errs chan error) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + errs <- engine.Run(ctx) +} + +func readConfig() *cliConfig { + log, err := zap.NewDevelopment(zap.AddCaller()) + if err != nil { + panic(err) + } + log = log.WithOptions(zap.WrapCore(zaputil.NewStackExtractCore)) + zap.ReplaceGlobals(log) + zap.RedirectStdLog(log) + + v := newViper() + + args := flag.Args() + if len(args) > 0 { + switch { + case len(args) > 1: + zap.L().Fatal("Too many command line arguments", zap.Strings("args", flag.Args())) + case args[0] == stdinConfigSelector: + log.Info("Reading config from standard input") + useStdinConfig = true + default: + v.SetConfigFile(args[0]) + } + } + + if useStdinConfig { + v.SetConfigType("yaml") + configBuffer, err := ioutil.ReadAll(bufio.NewReader(os.Stdin)) + if err != nil { + log.Fatal("Cannot read from standard input", zap.Error(err)) + } + err = v.ReadConfig(strings.NewReader(string(configBuffer))) + if err != nil { + log.Fatal("Config parsing failed", zap.Error(err)) + } + } else { + err = v.ReadInConfig() + log.Info("Reading config", zap.String("file", v.ConfigFileUsed())) + if err != nil { + log.Fatal("Config read failed", zap.Error(err)) + } + } + + conf := defaultConfig() + err = config.DecodeAndValidate(v.AllSettings(), conf) + if err != nil { + log.Fatal("Config decode failed", zap.Error(err)) + } + return conf +} + +func newViper() *viper.Viper { + v := viper.New() + v.SetConfigName(defaultConfigFile) + for _, dir := range configSearchDirs { + v.AddConfigPath(dir) + } + return v +} + +type monitoringConfig struct { + Expvar *expvarConfig + CPUProfile *cpuprofileConfig + MemProfile *memprofileConfig +} + +type expvarConfig struct { + Enabled bool `config:"enabled"` + Port int `config:"port" validate:"required"` +} + +type cpuprofileConfig struct { + Enabled bool `config:"enabled"` + File string `config:"file"` +} + +type memprofileConfig struct { + Enabled bool `config:"enabled"` + File string `config:"file"` +} + +func startMonitoring(conf monitoringConfig) (stop func()) { + zap.L().Debug("Start monitoring", zap.Reflect("conf", conf)) + if conf.Expvar != nil { + if conf.Expvar.Enabled { + go func() { + err := http.ListenAndServe(":"+strconv.Itoa(conf.Expvar.Port), nil) + zap.L().Fatal("Monitoring server failed", zap.Error(err)) + }() + } + } + var stops []func() + if conf.CPUProfile.Enabled { + f, err := os.Create(conf.CPUProfile.File) + if err != nil { + zap.L().Fatal("CPU profile file create fail", zap.Error(err)) + } + zap.L().Info("Starting CPU profiling") + pprof.StartCPUProfile(f) + stops = append(stops, func() { + pprof.StopCPUProfile() + f.Close() + }) + } + if conf.MemProfile.Enabled { + f, err := os.Create(conf.MemProfile.File) + if err != nil { + zap.L().Fatal("Memory profile file create fail", zap.Error(err)) + } + stops = append(stops, func() { + zap.L().Info("Writing memory profile") + runtime.GC() + pprof.WriteHeapProfile(f) + f.Close() + }) + } + stop = func() { + for _, s := range stops { + s() + } + } + return +} diff --git a/cli/expvar.go b/cli/expvar.go new file mode 100644 index 000000000..ed8f436d9 --- /dev/null +++ b/cli/expvar.go @@ -0,0 +1,53 @@ +package cli + +import ( + "time" + + "go.uber.org/zap" + + "github.com/yandex/pandora/core/engine" + "github.com/yandex/pandora/lib/monitoring" +) + +func newEngineMetrics() engine.Metrics { + return engine.Metrics{ + Request: monitoring.NewCounter("engine_Requests"), + Response: monitoring.NewCounter("engine_Responses"), + InstanceStart: monitoring.NewCounter("engine_UsersStarted"), + InstanceFinish: monitoring.NewCounter("engine_UsersFinished"), + } +} + +func startReport(m engine.Metrics) { + evReqPS := monitoring.NewCounter("engine_ReqPS") + evResPS := monitoring.NewCounter("engine_ResPS") + evActiveUsers := monitoring.NewCounter("engine_ActiveUsers") + evActiveRequests := monitoring.NewCounter("engine_ActiveRequests") + requests := m.Request.Get() + responses := m.Response.Get() + go func() { + var requestsNew, responsesNew int64 + // TODO(skipor): there is no guarantee, that we will run exactly after 1 second. + // So, when we get 1 sec +-10ms, we getting 990-1010 calculate intervals and +-2% RPS in reports. + // Consider using rcrowley/go-metrics.Meter. + for range time.NewTicker(1 * time.Second).C { + requestsNew = m.Request.Get() + responsesNew = m.Response.Get() + rps := responsesNew - responses + reqps := requestsNew - requests + activeUsers := m.InstanceStart.Get() - m.InstanceFinish.Get() + activeRequests := requestsNew - responsesNew + zap.S().Infof( + "[ENGINE] %d resp/s; %d req/s; %d users; %d active\n", + rps, reqps, activeUsers, activeRequests) + + requests = requestsNew + responses = responsesNew + + evActiveUsers.Set(activeUsers) + evActiveRequests.Set(activeRequests) + evReqPS.Set(reqps) + evResPS.Set(rps) + } + }() +} diff --git a/cmd/example.go b/cmd/example.go deleted file mode 100644 index 1ef9627e6..000000000 --- a/cmd/example.go +++ /dev/null @@ -1,72 +0,0 @@ -package cmd - -const exampleConfig string = ` -{ - "Pools": [ - { - "Name": "Pool#0", - "Gun": { - "GunType": "spdy", - "Parameters": { - "Target": "localhost:3000" - } - }, - "AmmoProvider": { - "AmmoType": "jsonline/spdy", - "AmmoSource": "./example/data/ammo.jsonline" - "Passes": 10, - }, - "ResultListener": { - "ListenerType": "log/simple", - "Destination": "" - }, - "UserLimiter": { - "LimiterType": "periodic", - "Parameters": { - "BatchSize": 3, - "MaxCount": 9, - "Period": 1 - } - }, - "StartupLimiter": { - "LimiterType": "periodic", - "Parameters": { - "BatchSize": 2, - "MaxCount": 5, - "Period": 0.1 - } - } - }, - { - "Name": "Pool#1", - "Gun": { - "GunType": "log" - }, - "AmmoProvider": { - "AmmoType": "dummy/log", - "AmmoSource": "" - }, - "ResultListener": { - "ListenerType": "log/simple", - "Destination": "" - }, - "UserLimiter": { - "LimiterType": "periodic", - "Parameters": { - "BatchSize": 3, - "MaxCount": 9, - "Period": 1 - } - }, - "StartupLimiter": { - "LimiterType": "periodic", - "Parameters": { - "BatchSize": 1, - "MaxCount": 4, - "Period": 0.2 - } - } - } - ] -} -` diff --git a/cmd/http_gun.go b/cmd/http_gun.go deleted file mode 100644 index 13fb00b0c..000000000 --- a/cmd/http_gun.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !noHttpGun - -package cmd - -import ( - "github.com/yandex/pandora/extpoints" - httpGun "github.com/yandex/pandora/gun/http" -) - -func init() { - // inject guns - extpoints.Guns.Register(httpGun.New, "http") -} diff --git a/cmd/pandora.go b/cmd/pandora.go deleted file mode 100644 index 848211463..000000000 --- a/cmd/pandora.go +++ /dev/null @@ -1,118 +0,0 @@ -package cmd - -import ( - "flag" - "fmt" - "io/ioutil" - "log" - "net/http" - "os" - "runtime/pprof" - "time" - - "golang.org/x/net/context" - - "github.com/yandex/pandora/aggregate" - "github.com/yandex/pandora/ammo" - "github.com/yandex/pandora/config" - "github.com/yandex/pandora/engine" - "github.com/yandex/pandora/extpoints" - "github.com/yandex/pandora/limiter" - "github.com/yandex/pandora/utils" -) - -func init() { - // inject ammo providers - extpoints.AmmoProviders.Register(ammo.NewHttpProvider, "jsonline/http") - extpoints.AmmoProviders.Register(ammo.NewHttpProvider, "jsonline/spdy") - extpoints.AmmoProviders.Register(ammo.NewLogAmmoProvider, "dummy/log") - - // inject result listeners - extpoints.ResultListeners.Register(aggregate.NewLoggingResultListener, "log/simple") - extpoints.ResultListeners.Register(aggregate.GetPhoutResultListener, "log/phout") - - // inject limiters - extpoints.Limiters.Register(limiter.NewPeriodicFromConfig, "periodic") - extpoints.Limiters.Register(limiter.NewCompositeFromConfig, "composite") - extpoints.Limiters.Register(limiter.NewUnlimitedFromConfig, "unlimited") - extpoints.Limiters.Register(limiter.NewLinearFromConfig, "linear") -} - -func Run() { - fmt.Printf("Pandora v%s\n", Version) - flag.Usage = func() { - fmt.Fprintf(os.Stderr, "Usage of Pandora: pandora []\n"+ - " is './load.json' by default\n") - flag.PrintDefaults() - } - example := flag.Bool("example", false, "print example config to STDOUT and exit") - cpuprofile := flag.String("cpuprofile", "", "write cpu profile to file") - memprofile := flag.String("memprofile", "", "write memory profile to this file") - expvarHttp := flag.Bool("expvar", false, "start HTTP server with monitoring variables") - flag.Parse() - - if *example { - fmt.Printf(exampleConfig) - return - } - - if *expvarHttp { - go http.ListenAndServe(":1234", nil) - } - - configFileName := "./load.json" - if len(flag.Args()) > 0 { - configFileName = flag.Args()[0] - } - log.Printf("Reading config from '%s'...\n", configFileName) - jsonDoc, err := ioutil.ReadFile(configFileName) - if err != nil { - log.Printf("Could not read config from file: %s", err) - return - } - cfg, err := config.NewGlobalFromJSON(jsonDoc) - if err != nil { - log.Printf("Could not unmarshal config from json: %s", err) - return - } - - if *cpuprofile != "" { - f, err := os.Create(*cpuprofile) - if err != nil { - log.Fatal(err) - } - pprof.StartCPUProfile(f) - defer pprof.StopCPUProfile() - } - if *memprofile != "" { - defer func() { - f, err := os.Create(*memprofile) - if err != nil { - log.Fatal(err) - } - pprof.WriteHeapProfile(f) - f.Close() - }() - } - pandora := engine.New(cfg) - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - promise := utils.Promise(func() error { return pandora.Serve(ctx) }) - - select { - case <-utils.NotifyInterrupt(): - log.Print("Interrupting by signal, trying to stop") - cancel() - select { - case err = <-promise: - case <-time.After(time.Second * 5): - err = fmt.Errorf("timeout exceeded") - } - case err = <-promise: - } - if err != nil { - log.Fatal(err) - } -} diff --git a/cmd/spdy_gun.go b/cmd/spdy_gun.go deleted file mode 100644 index dfb9441d1..000000000 --- a/cmd/spdy_gun.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !noSpdyGun - -package cmd - -import ( - "github.com/yandex/pandora/extpoints" - spdyGun "github.com/yandex/pandora/gun/spdy" -) - -func init() { - // inject guns - extpoints.Guns.Register(spdyGun.New, "spdy") -} diff --git a/cmd/version.go b/cmd/version.go deleted file mode 100644 index 1ac189586..000000000 --- a/cmd/version.go +++ /dev/null @@ -1,3 +0,0 @@ -package cmd - -const Version = "0.1.2" diff --git a/components/example/example.go b/components/example/example.go new file mode 100644 index 000000000..f6dab2222 --- /dev/null +++ b/components/example/example.go @@ -0,0 +1,98 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package example + +import ( + "context" + "fmt" + "sync" + + "go.uber.org/zap" + + "github.com/yandex/pandora/core" + "github.com/yandex/pandora/core/aggregator/netsample" +) + +type Ammo struct { + Message string +} + +func NewGun() *Gun { + return &Gun{} +} + +type Gun struct { + aggregator core.Aggregator + core.GunDeps +} + +var _ core.Gun = &Gun{} + +func (l *Gun) Bind(aggregator core.Aggregator, deps core.GunDeps) error { + l.aggregator = aggregator + l.GunDeps = deps + return nil +} + +func (l *Gun) Shoot(a core.Ammo) { + sample := netsample.Acquire("REQUEST") + // Do work here. + l.Log.Info("Example Gun message", zap.String("message", a.(*Ammo).Message)) + sample.SetProtoCode(200) + l.aggregator.Report(sample) +} + +type ProviderConfig struct { + AmmoLimit int `config:"limit"` +} + +func DefaultProviderConfig() ProviderConfig { + return ProviderConfig{AmmoLimit: 16} +} + +func NewProvider(conf ProviderConfig) *Provider { + return &Provider{ + ProviderConfig: conf, + sink: make(chan *Ammo, 128), + pool: sync.Pool{New: func() interface{} { return &Ammo{} }}, + } +} + +type Provider struct { + ProviderConfig + sink chan *Ammo + pool sync.Pool +} + +var _ core.Provider = &Provider{} + +func (p *Provider) Acquire() (ammo core.Ammo, ok bool) { + ammo, ok = <-p.sink + return +} + +func (p *Provider) Release(ammo core.Ammo) { + p.pool.Put(ammo) +} + +func (p *Provider) Run(ctx context.Context, deps core.ProviderDeps) error { + defer close(p.sink) + for i := 0; i < p.AmmoLimit; i++ { + select { + case p.sink <- p.newAmmo(i): + continue + case <-ctx.Done(): + } + break + } + return nil +} + +func (p *Provider) newAmmo(i int) *Ammo { + ammo := p.pool.Get().(*Ammo) + ammo.Message = fmt.Sprintf(`Job #%d"`, i) + return ammo +} diff --git a/components/example/import/import.go b/components/example/import/import.go new file mode 100644 index 000000000..c5c99c73d --- /dev/null +++ b/components/example/import/import.go @@ -0,0 +1,16 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package example + +import ( + . "github.com/yandex/pandora/components/example" + "github.com/yandex/pandora/core/register" +) + +func Import() { + register.Provider("example", NewProvider, DefaultProviderConfig) + register.Gun("example", NewGun) +} diff --git a/components/example/import/import_suite_test.go b/components/example/import/import_suite_test.go new file mode 100644 index 000000000..e1432a1c3 --- /dev/null +++ b/components/example/import/import_suite_test.go @@ -0,0 +1,19 @@ +package example + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/yandex/pandora/lib/ginkgoutil" +) + +func TestImport(t *testing.T) { + ginkgoutil.RunSuite(t, "Import Suite") +} + +var _ = Describe("import", func() { + It("not panics", func() { + Expect(Import).NotTo(Panic()) + }) +}) diff --git a/components/phttp/ammo/simple/ammo.go b/components/phttp/ammo/simple/ammo.go new file mode 100644 index 000000000..4c01e8575 --- /dev/null +++ b/components/phttp/ammo/simple/ammo.go @@ -0,0 +1,41 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package simple + +import ( + "net/http" + + "github.com/yandex/pandora/components/phttp" + "github.com/yandex/pandora/core/aggregator/netsample" +) + +type Ammo struct { + // OPTIMIZE(skipor): reuse *http.Request. + // Need to research is it possible. http.Transport can hold reference to http.Request. + req *http.Request + tag string + id int +} + +func (a *Ammo) Request() (*http.Request, *netsample.Sample) { + sample := netsample.Acquire(a.tag) + sample.SetId(a.id) + return a.req, sample +} + +func (a *Ammo) Reset(req *http.Request, tag string) { + *a = Ammo{req, tag, -1} +} + +func (a *Ammo) SetId(id int) { + a.id = id +} + +func (a *Ammo) Id() int { + return a.id +} + +var _ phttp.Ammo = (*Ammo)(nil) diff --git a/components/phttp/ammo/simple/jsonline/data.go b/components/phttp/ammo/simple/jsonline/data.go new file mode 100644 index 000000000..c95b0b802 --- /dev/null +++ b/components/phttp/ammo/simple/jsonline/data.go @@ -0,0 +1,18 @@ +//go:generate ffjson $GOFILE + +package jsonline + +// ffjson: noencoder +type data struct { + // Host defines Host header to send. + // Request endpoint is defied by gun config. + Host string `json:"host"` + Method string `json:"method"` + Uri string `json:"uri"` + // Headers defines headers to send. + // NOTE: Host header will be silently ignored. + Headers map[string]string `json:"headers"` + Tag string `json:"tag"` + // Body should be string, doublequotes should be escaped for json body + Body string `json:"body"` +} diff --git a/ammo/http_ffjson.go b/components/phttp/ammo/simple/jsonline/data_ffjson.go similarity index 64% rename from ammo/http_ffjson.go rename to components/phttp/ammo/simple/jsonline/data_ffjson.go index c7100b804..56f4327f6 100644 --- a/ammo/http_ffjson.go +++ b/components/phttp/ammo/simple/jsonline/data_ffjson.go @@ -1,9 +1,7 @@ -// DO NOT EDIT! -// Code generated by ffjson -// source: http.go -// DO NOT EDIT! +// Code generated by ffjson . DO NOT EDIT. +// source: data.go -package ammo +package jsonline import ( "bytes" @@ -13,38 +11,44 @@ import ( ) const ( - ffj_t_Httpbase = iota - ffj_t_Httpno_such_key + ffjtdatabase = iota + ffjtdatanosuchkey - ffj_t_Http_Host + ffjtdataHost - ffj_t_Http_Method + ffjtdataMethod - ffj_t_Http_Uri + ffjtdataUri - ffj_t_Http_Headers + ffjtdataHeaders - ffj_t_Http_Tag + ffjtdataTag + + ffjtdataBody ) -var ffj_key_Http_Host = []byte("Host") +var ffjKeydataHost = []byte("host") + +var ffjKeydataMethod = []byte("method") -var ffj_key_Http_Method = []byte("Method") +var ffjKeydataUri = []byte("uri") -var ffj_key_Http_Uri = []byte("Uri") +var ffjKeydataHeaders = []byte("headers") -var ffj_key_Http_Headers = []byte("Headers") +var ffjKeydataTag = []byte("tag") -var ffj_key_Http_Tag = []byte("Tag") +var ffjKeydataBody = []byte("body") -func (uj *Http) UnmarshalJSON(input []byte) error { +// UnmarshalJSON umarshall json - template of ffjson +func (j *data) UnmarshalJSON(input []byte) error { fs := fflib.NewFFLexer(input) - return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) + return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) } -func (uj *Http) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { - var err error = nil - currentKey := ffj_t_Httpbase +// UnmarshalJSONFFLexer fast json unmarshall - template ffjson +func (j *data) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error + currentKey := ffjtdatabase _ = currentKey tok := fflib.FFTok_init wantedTok := fflib.FFTok_init @@ -90,82 +94,96 @@ mainparse: kn := fs.Output.Bytes() if len(kn) <= 0 { // "" case. hrm. - currentKey = ffj_t_Httpno_such_key + currentKey = ffjtdatanosuchkey state = fflib.FFParse_want_colon goto mainparse } else { switch kn[0] { - case 'H': + case 'b': - if bytes.Equal(ffj_key_Http_Host, kn) { - currentKey = ffj_t_Http_Host + if bytes.Equal(ffjKeydataBody, kn) { + currentKey = ffjtdataBody state = fflib.FFParse_want_colon goto mainparse + } - } else if bytes.Equal(ffj_key_Http_Headers, kn) { - currentKey = ffj_t_Http_Headers + case 'h': + + if bytes.Equal(ffjKeydataHost, kn) { + currentKey = ffjtdataHost + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffjKeydataHeaders, kn) { + currentKey = ffjtdataHeaders state = fflib.FFParse_want_colon goto mainparse } - case 'M': + case 'm': - if bytes.Equal(ffj_key_Http_Method, kn) { - currentKey = ffj_t_Http_Method + if bytes.Equal(ffjKeydataMethod, kn) { + currentKey = ffjtdataMethod state = fflib.FFParse_want_colon goto mainparse } - case 'T': + case 't': - if bytes.Equal(ffj_key_Http_Tag, kn) { - currentKey = ffj_t_Http_Tag + if bytes.Equal(ffjKeydataTag, kn) { + currentKey = ffjtdataTag state = fflib.FFParse_want_colon goto mainparse } - case 'U': + case 'u': - if bytes.Equal(ffj_key_Http_Uri, kn) { - currentKey = ffj_t_Http_Uri + if bytes.Equal(ffjKeydataUri, kn) { + currentKey = ffjtdataUri state = fflib.FFParse_want_colon goto mainparse } } - if fflib.SimpleLetterEqualFold(ffj_key_Http_Tag, kn) { - currentKey = ffj_t_Http_Tag + if fflib.SimpleLetterEqualFold(ffjKeydataBody, kn) { + currentKey = ffjtdataBody + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeydataTag, kn) { + currentKey = ffjtdataTag state = fflib.FFParse_want_colon goto mainparse } - if fflib.EqualFoldRight(ffj_key_Http_Headers, kn) { - currentKey = ffj_t_Http_Headers + if fflib.EqualFoldRight(ffjKeydataHeaders, kn) { + currentKey = ffjtdataHeaders state = fflib.FFParse_want_colon goto mainparse } - if fflib.SimpleLetterEqualFold(ffj_key_Http_Uri, kn) { - currentKey = ffj_t_Http_Uri + if fflib.SimpleLetterEqualFold(ffjKeydataUri, kn) { + currentKey = ffjtdataUri state = fflib.FFParse_want_colon goto mainparse } - if fflib.SimpleLetterEqualFold(ffj_key_Http_Method, kn) { - currentKey = ffj_t_Http_Method + if fflib.SimpleLetterEqualFold(ffjKeydataMethod, kn) { + currentKey = ffjtdataMethod state = fflib.FFParse_want_colon goto mainparse } - if fflib.EqualFoldRight(ffj_key_Http_Host, kn) { - currentKey = ffj_t_Http_Host + if fflib.EqualFoldRight(ffjKeydataHost, kn) { + currentKey = ffjtdataHost state = fflib.FFParse_want_colon goto mainparse } - currentKey = ffj_t_Httpno_such_key + currentKey = ffjtdatanosuchkey state = fflib.FFParse_want_colon goto mainparse } @@ -182,22 +200,25 @@ mainparse: if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { switch currentKey { - case ffj_t_Http_Host: + case ffjtdataHost: goto handle_Host - case ffj_t_Http_Method: + case ffjtdataMethod: goto handle_Method - case ffj_t_Http_Uri: + case ffjtdataUri: goto handle_Uri - case ffj_t_Http_Headers: + case ffjtdataHeaders: goto handle_Headers - case ffj_t_Http_Tag: + case ffjtdataTag: goto handle_Tag - case ffj_t_Httpno_such_key: + case ffjtdataBody: + goto handle_Body + + case ffjtdatanosuchkey: err = fs.SkipField(tok) if err != nil { return fs.WrapErr(err) @@ -213,7 +234,7 @@ mainparse: handle_Host: - /* handler: uj.Host type=string kind=string quoted=false*/ + /* handler: j.Host type=string kind=string quoted=false*/ { @@ -229,7 +250,7 @@ handle_Host: outBuf := fs.Output.Bytes() - uj.Host = string(string(outBuf)) + j.Host = string(string(outBuf)) } } @@ -239,7 +260,7 @@ handle_Host: handle_Method: - /* handler: uj.Method type=string kind=string quoted=false*/ + /* handler: j.Method type=string kind=string quoted=false*/ { @@ -255,7 +276,7 @@ handle_Method: outBuf := fs.Output.Bytes() - uj.Method = string(string(outBuf)) + j.Method = string(string(outBuf)) } } @@ -265,7 +286,7 @@ handle_Method: handle_Uri: - /* handler: uj.Uri type=string kind=string quoted=false*/ + /* handler: j.Uri type=string kind=string quoted=false*/ { @@ -281,7 +302,7 @@ handle_Uri: outBuf := fs.Output.Bytes() - uj.Uri = string(string(outBuf)) + j.Uri = string(string(outBuf)) } } @@ -291,7 +312,7 @@ handle_Uri: handle_Headers: - /* handler: uj.Headers type=map[string]string kind=map quoted=false*/ + /* handler: j.Headers type=map[string]string kind=map quoted=false*/ { @@ -302,10 +323,10 @@ handle_Headers: } if tok == fflib.FFTok_null { - uj.Headers = nil + j.Headers = nil } else { - uj.Headers = make(map[string]string, 0) + j.Headers = make(map[string]string, 0) wantVal := true @@ -313,7 +334,7 @@ handle_Headers: var k string - var v string + var tmpJHeaders string tok = fs.Scan() if tok == fflib.FFTok_error { @@ -362,7 +383,7 @@ handle_Headers: } tok = fs.Scan() - /* handler: v type=string kind=string quoted=false*/ + /* handler: tmpJHeaders type=string kind=string quoted=false*/ { @@ -378,12 +399,12 @@ handle_Headers: outBuf := fs.Output.Bytes() - v = string(string(outBuf)) + tmpJHeaders = string(string(outBuf)) } } - uj.Headers[k] = v + j.Headers[k] = tmpJHeaders wantVal = false } @@ -396,7 +417,33 @@ handle_Headers: handle_Tag: - /* handler: uj.Tag type=string kind=string quoted=false*/ + /* handler: j.Tag type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + j.Tag = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Body: + + /* handler: j.Body type=string kind=string quoted=false*/ { @@ -412,7 +459,7 @@ handle_Tag: outBuf := fs.Output.Bytes() - uj.Tag = string(string(outBuf)) + j.Body = string(string(outBuf)) } } @@ -434,5 +481,6 @@ tokerror: } panic("ffjson-generated: unreachable, please report bug.") done: + return nil } diff --git a/components/phttp/ammo/simple/jsonline/jsonline_suite_test.go b/components/phttp/ammo/simple/jsonline/jsonline_suite_test.go new file mode 100644 index 000000000..3f22584a4 --- /dev/null +++ b/components/phttp/ammo/simple/jsonline/jsonline_suite_test.go @@ -0,0 +1,240 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package jsonline + +import ( + "context" + "encoding/json" + "net/http" + "sync" + "testing" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/gstruct" + "github.com/spf13/afero" + + "github.com/yandex/pandora/components/phttp/ammo/simple" + "github.com/yandex/pandora/core" + "github.com/yandex/pandora/lib/ginkgoutil" +) + +func TestJsonline(t *testing.T) { + ginkgoutil.RunSuite(t, "Jsonline Suite") +} + +const testFile = "./ammo.jsonline" + +// testData holds jsonline.data that contains in testFile +var testData = []data{ + { + Host: "example.com", + Method: "GET", + Uri: "/00", + Headers: map[string]string{"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "User-Agent": "Pandora/0.0.1"}, + }, + { + Host: "ya.ru", + Method: "HEAD", + Uri: "/01", + Headers: map[string]string{"Accept": "*/*", "Accept-Encoding": "gzip, brotli", "User-Agent": "YaBro/0.1"}, + Tag: "head", + }, +} + +var testFs = newTestFs() + +func newTestFs() afero.Fs { + fs := afero.NewMemMapFs() + file, err := fs.Create(testFile) + if err != nil { + panic(err) + } + encoder := json.NewEncoder(file) + for _, d := range testData { + err := encoder.Encode(d) + if err != nil { + panic(err) + } + } + return afero.NewReadOnlyFs(fs) +} + +func newTestProvider(conf Config) *Provider { + return NewProvider(testFs, conf) +} + +var _ = Describe("data", func() { + It("decoded well", func() { + data := data{ + Host: "ya.ru", + Method: "GET", + Uri: "/00", + Headers: map[string]string{"A": "a", "B": "b"}, + Tag: "tag", + } + req, err := data.ToRequest() + Expect(err).To(BeNil()) + Expect(*req).To(MatchFields(IgnoreExtras, Fields{ + "Proto": Equal("HTTP/1.1"), + "ProtoMajor": Equal(1), + "ProtoMinor": Equal(1), + "URL": PointTo(MatchFields(IgnoreExtras, Fields{ + "Scheme": Equal("http"), + "Host": Equal(data.Host), + "Path": Equal(data.Uri), + })), + "Header": Equal(http.Header{ + "A": []string{"a"}, + "B": []string{"b"}, + }), + "Method": Equal(data.Method), + })) + }) +}) + +var _ = Describe("provider start", func() { + It("ok", func() { + p := newTestProvider(Config{File: testFile}) + ctx, cancel := context.WithCancel(context.Background()) + errch := make(chan error) + go func() { errch <- p.Run(ctx, core.ProviderDeps{}) }() + Expect(errch).NotTo(Receive()) + cancel() + var err error + Eventually(errch).Should(Receive(&err)) + Expect(err).NotTo(HaveOccurred()) + }) + + It("fail", func() { + p := newTestProvider(Config{File: "no_such_file"}) + err := p.Run(context.Background(), core.ProviderDeps{}) + Expect(err).To(HaveOccurred()) + }) +}) + +var _ = Describe("provider decode", func() { + var ( + // Configured in BeforeEach. + conf Config // File always is testFile. + expectedStartErr error + successReceives int // How much ammo should be received. + + provider *Provider + cancel context.CancelFunc + errch chan error + ammos []*simple.Ammo + ) + + BeforeEach(func() { + conf = Config{} + expectedStartErr = nil + ammos = nil + successReceives = 0 + }) + + JustBeforeEach(func() { + conf.File = testFile + provider = newTestProvider(conf) + errch = make(chan error) + var ctx context.Context + ctx, cancel = context.WithCancel(context.Background()) + go func() { errch <- provider.Run(ctx, core.ProviderDeps{}) }() + Expect(errch).NotTo(Receive()) + + for i := 0; i < successReceives; i++ { + am, ok := provider.Acquire() + Expect(ok).To(BeTrue()) + ammos = append(ammos, am.(*simple.Ammo)) + } + }, 1) + + AfterEach(func() { + _, ok := provider.Acquire() + Expect(ok).To(BeFalse(), "All ammo should be readed.") + defer cancel() + var err error + Eventually(errch).Should(Receive(&err)) + if expectedStartErr == nil { + Expect(err).To(BeNil()) + } else { + Expect(err).To(Equal(expectedStartErr)) + } + for i := 0; i < len(ammos); i++ { + expectedData := testData[i%len(testData)] + expectedReq, err := expectedData.ToRequest() + Expect(err).To(BeNil()) + ammo := ammos[i] + req, ss := ammo.Request() + Expect(*req).To(MatchFields(IgnoreExtras, Fields{ + "Proto": Equal(expectedReq.Proto), + "ProtoMajor": Equal(expectedReq.ProtoMajor), + "ProtoMinor": Equal(expectedReq.ProtoMinor), + "URL": PointTo(MatchFields(IgnoreExtras, Fields{ + "Scheme": Equal(expectedReq.URL.Scheme), + "Host": Equal(expectedReq.URL.Host), + "Path": Equal(expectedReq.URL.Path), + })), + "Header": Equal(expectedReq.Header), + "Method": Equal(expectedReq.Method), + "Body": Equal(expectedReq.Body), + })) + Expect(ss.Tags()).To(Equal(expectedData.Tag)) + } + }) + + Context("unlimited", func() { + BeforeEach(func() { + successReceives = 5 * len(testData) + }) + It("ok", func() { + cancel() + expectedStartErr = nil + Eventually(provider.Sink, time.Second, time.Millisecond).Should(BeClosed()) + }) + }) + + Context("limit set", func() { + BeforeEach(func() { + conf.Passes = 2 + successReceives = len(testData) * conf.Passes + }) + It("ok", func() { + }) + }) + + Context("passes set", func() { + BeforeEach(func() { + conf.Passes = 10 + conf.Limit = 5 + successReceives = conf.Limit + }) + It("ok", func() {}) + }) + +}) + +func Benchmark(b *testing.B) { + RegisterTestingT(b) + jsonDoc, err := json.Marshal(testData[0]) + Expect(err).To(BeNil()) + pool := sync.Pool{ + New: func() interface{} { return &simple.Ammo{} }, + } + b.Run("Decode", func(b *testing.B) { + for n := 0; n < b.N; n++ { + decodeAmmo(jsonDoc, &simple.Ammo{}) + } + }) + b.Run("DecodeWithPool", func(b *testing.B) { + for n := 0; n < b.N; n++ { + h := pool.Get().(*simple.Ammo) + decodeAmmo(jsonDoc, h) + pool.Put(h) + } + }) +} diff --git a/components/phttp/ammo/simple/jsonline/provider.go b/components/phttp/ammo/simple/jsonline/provider.go new file mode 100644 index 000000000..98810f4ec --- /dev/null +++ b/components/phttp/ammo/simple/jsonline/provider.go @@ -0,0 +1,91 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package jsonline + +import ( + "bufio" + "context" + "net/http" + "strings" + + "github.com/pkg/errors" + "github.com/spf13/afero" + "github.com/yandex/pandora/components/phttp/ammo/simple" +) + +func NewProvider(fs afero.Fs, conf Config) *Provider { + var p Provider + p = Provider{ + Provider: simple.NewProvider(fs, conf.File, p.start), + Config: conf, + } + return &p +} + +type Provider struct { + simple.Provider + Config +} + +type Config struct { + File string `validate:"required"` + // Limit limits total num of ammo. Unlimited if zero. + Limit int `validate:"min=0"` + // Passes limits ammo file passes. Unlimited if zero. + Passes int `validate:"min=0"` +} + +func (p *Provider) start(ctx context.Context, ammoFile afero.File) error { + var ammoNum, passNum int + for { + passNum++ + scanner := bufio.NewScanner(ammoFile) + for line := 1; scanner.Scan() && (p.Limit == 0 || ammoNum < p.Limit); line++ { + data := scanner.Bytes() + a, err := decodeAmmo(data, p.Pool.Get().(*simple.Ammo)) + if err != nil { + return errors.Wrapf(err, "failed to decode ammo at line: %v; data: %q", line, data) + } + ammoNum++ + select { + case p.Sink <- a: + case <-ctx.Done(): + return nil + } + } + if p.Passes != 0 && passNum >= p.Passes { + break + } + ammoFile.Seek(0, 0) + } + return nil +} + +func decodeAmmo(jsonDoc []byte, am *simple.Ammo) (*simple.Ammo, error) { + var data data + err := data.UnmarshalJSON(jsonDoc) + if err != nil { + return nil, errors.WithStack(err) + } + req, err := data.ToRequest() + if err != nil { + return nil, err + } + am.Reset(req, data.Tag) + return am, nil +} + +func (d *data) ToRequest() (req *http.Request, err error) { + uri := "http://" + d.Host + d.Uri + req, err = http.NewRequest(d.Method, uri, strings.NewReader(d.Body)) + if err != nil { + return nil, errors.WithStack(err) + } + for k, v := range d.Headers { + req.Header.Set(k, v) + } + return +} diff --git a/components/phttp/ammo/simple/provider.go b/components/phttp/ammo/simple/provider.go new file mode 100644 index 000000000..cd4167e5f --- /dev/null +++ b/components/phttp/ammo/simple/provider.go @@ -0,0 +1,60 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package simple + +import ( + "context" + "sync" + + "github.com/pkg/errors" + "github.com/spf13/afero" + "go.uber.org/atomic" + + "github.com/yandex/pandora/core" +) + +func NewProvider(fs afero.Fs, fileName string, start func(ctx context.Context, file afero.File) error) Provider { + return Provider{ + fs: fs, + fileName: fileName, + start: start, + Sink: make(chan *Ammo, 128), + Pool: sync.Pool{New: func() interface{} { return &Ammo{} }}, + } +} + +type Provider struct { + fs afero.Fs + fileName string + start func(ctx context.Context, file afero.File) error + Sink chan *Ammo + Pool sync.Pool + idCounter atomic.Int64 + core.ProviderDeps +} + +func (p *Provider) Acquire() (core.Ammo, bool) { + ammo, ok := <-p.Sink + if ok { + ammo.SetId(int(p.idCounter.Inc() - 1)) + } + return ammo, ok +} + +func (p *Provider) Release(a core.Ammo) { + p.Pool.Put(a) +} + +func (p *Provider) Run(ctx context.Context, deps core.ProviderDeps) error { + p.ProviderDeps = deps + defer close(p.Sink) + file, err := p.fs.Open(p.fileName) + if err != nil { + return errors.Wrap(err, "failed to open ammo file") + } + defer file.Close() + return p.start(ctx, file) +} diff --git a/components/phttp/ammo/simple/raw/decoder.go b/components/phttp/ammo/simple/raw/decoder.go new file mode 100644 index 000000000..b6b7e63e5 --- /dev/null +++ b/components/phttp/ammo/simple/raw/decoder.go @@ -0,0 +1,58 @@ +package raw + +import ( + "bufio" + "bytes" + "net/http" + "strconv" + "strings" + + "github.com/pkg/errors" +) + +type Header struct { + key string + value string +} + +func decodeHeader(headerString []byte) (reqSize int, tag string, err error) { + parts := strings.SplitN(string(headerString), " ", 2) + reqSize, err = strconv.Atoi(parts[0]) + if len(parts) > 1 { + tag = parts[1] + } + return +} + +func decodeRequest(reqString []byte) (req *http.Request, err error) { + req, err = http.ReadRequest(bufio.NewReader(bytes.NewReader(reqString))) + if err != nil { + return + } + if req.Host != "" { + req.URL.Host = req.Host + } + req.RequestURI = "" + return +} + +func decodeHTTPConfigHeaders(headers []string) (configHTTPHeaders []Header, err error) { + for _, header := range headers { + line := []byte(header) + if len(line) < 3 || line[0] != '[' || line[len(line)-1] != ']' { + return nil, errors.New("header line should be like '[key: value]") + } + line = line[1 : len(line)-1] + colonIdx := bytes.IndexByte(line, ':') + if colonIdx < 0 { + return nil, errors.New("missing colon") + } + configHTTPHeaders = append( + configHTTPHeaders, + Header{ + string(bytes.TrimSpace(line[:colonIdx])), + string(bytes.TrimSpace(line[colonIdx+1:])), + }) + } + return +} diff --git a/components/phttp/ammo/simple/raw/decoder_bench_test.go b/components/phttp/ammo/simple/raw/decoder_bench_test.go new file mode 100644 index 000000000..f622bfa1c --- /dev/null +++ b/components/phttp/ammo/simple/raw/decoder_bench_test.go @@ -0,0 +1,42 @@ +package raw + +import "testing" + +var ( + benchTestConfigHeaders = []string{ + "[Host: yourhost.tld]", + "[Sometest: someval]", + } +) + +const ( + benchTestRequest = "GET / HTTP/1.1\r\n" + + "Host: yourhost.tld" + + "Content-Length: 0\r\n" + + "\r\n" +) + +// BenchmarkRawDecoder-4 500000 2040 ns/op 5152 B/op 11 allocs/op +// BenchmarkRawDecoderWithHeaders-4 1000000 1944 ns/op 5168 B/op 12 allocs/op + +func BenchmarkRawDecoder(b *testing.B) { + for i := 0; i < b.N; i++ { + decodeRequest([]byte(benchTestRequest)) + } +} + +func BenchmarkRawDecoderWithHeaders(b *testing.B) { + b.StopTimer() + decodedHTTPConfigHeaders, _ := decodeHTTPConfigHeaders(benchTestConfigHeaders) + b.StartTimer() + for i := 0; i < b.N; i++ { + req, _ := decodeRequest([]byte(benchTestRequest)) + for _, header := range decodedHTTPConfigHeaders { + if header.key == "Host" { + req.URL.Host = header.value + } else { + req.Header.Set(header.key, header.value) + } + } + } +} diff --git a/components/phttp/ammo/simple/raw/decoder_test.go b/components/phttp/ammo/simple/raw/decoder_test.go new file mode 100644 index 000000000..9f8288306 --- /dev/null +++ b/components/phttp/ammo/simple/raw/decoder_test.go @@ -0,0 +1,116 @@ +package raw + +import ( + "bytes" + "io" + "net/http" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/gstruct" +) + +var _ = Describe("Decoder", func() { + It("should parse header with tag", func() { + raw := "123 tag" + reqSize, tag, err := decodeHeader([]byte(raw)) + Expect(err).To(BeNil()) + Expect(reqSize).To(Equal(123)) + Expect(tag).To(Equal("tag")) + }) + It("should parse header without tag", func() { + raw := "123" + reqSize, tag, err := decodeHeader([]byte(raw)) + Expect(err).To(BeNil()) + Expect(reqSize).To(Equal(123)) + Expect(tag).To(Equal("")) + }) + It("should parse GET request", func() { + raw := "GET /some/path HTTP/1.0\r\n" + + "Host: foo.com\r\n" + + "Connection: close\r\n\r\n" + req, err := decodeRequest([]byte(raw)) + Expect(err).To(BeNil()) + Expect(*req.URL).To(MatchFields(IgnoreExtras, Fields{ + "Path": Equal("/some/path"), + "Scheme": BeEmpty(), + })) + Expect(req.RequestURI).To(Equal("")) + Expect(req.Host).To(Equal("foo.com")) + Expect(req.Header).To(Equal(http.Header{"Connection": []string{"close"}})) + }) + It("should parse POST request with body", func() { + raw := "POST /some/path HTTP/1.1\r\n" + + "Host: foo.com\r\n" + + "Transfer-Encoding: chunked\r\n" + + "Foo: bar\r\n" + + "Content-Length: 9999\r\n\r\n" + // to be removed. + "3\r\nfoo\r\n" + + "3\r\nbar\r\n" + + "0\r\n" + + "\r\n" + req, err := decodeRequest([]byte(raw)) + Expect(err).To(BeNil()) + Expect(*req.URL).To(MatchFields(IgnoreExtras, Fields{ + "Path": Equal("/some/path"), + "Scheme": BeEmpty(), + })) + Expect(req.RequestURI).To(Equal("")) + Expect(req.Host).To(Equal("foo.com")) + Expect(req.Header).To(Equal(http.Header{"Foo": []string{"bar"}})) + var bout bytes.Buffer + if req.Body != nil { + _, err := io.Copy(&bout, req.Body) + Expect(err).To(BeNil()) + req.Body.Close() + } + Expect(bout.String()).To(Equal("foobar")) + }) + It("should return error on bad urls", func() { + raw := "GET ../../../../etc/passwd HTTP/1.1\r\n" + + "Host: foo.com\r\n" + + "Content-Length: 0\r\n" + + "\r\n" + req, err := decodeRequest([]byte(raw)) + Expect(err).ToNot(BeNil()) + Expect(req).To(BeNil()) + }) + It("should replace header Host for URL if specified", func() { + raw := "GET /etc/passwd HTTP/1.1\r\n" + + "Host: hostname.tld\r\n" + + "Content-Length: 0\r\n" + + "\r\n" + req, err := decodeRequest([]byte(raw)) + Expect(err).To(BeNil()) + Expect(req.Host).To(Equal("hostname.tld")) + Expect(req.URL.Host).To(Equal("hostname.tld")) + }) + It("should replace header Host from config", func() { + const host = "hostname.tld" + const newhost = "newhostname.tld" + + raw := "GET / HTTP/1.1\r\n" + + "Host: " + host + "\r\n" + + "Content-Length: 0\r\n" + + "\r\n" + configHeaders := []string{ + "[Host: " + newhost + "]", + "[SomeTestKey: sometestvalue]", + } + req, err := decodeRequest([]byte(raw)) + Expect(err).To(BeNil()) + Expect(req.Host).To(Equal(host)) + Expect(req.URL.Host).To(Equal(host)) + decodedConfigHeaders, _ := decodeHTTPConfigHeaders(configHeaders) + for _, header := range decodedConfigHeaders { + // special behavior for `Host` header + if header.key == "Host" { + req.URL.Host = header.value + } else { + req.Header.Set(header.key, header.value) + } + } + Expect(req.URL.Host).To(Equal(newhost)) + Expect(req.Header.Get("SomeTestKey")).To(Equal("sometestvalue")) + }) +}) diff --git a/components/phttp/ammo/simple/raw/provider.go b/components/phttp/ammo/simple/raw/provider.go new file mode 100644 index 000000000..e1b66c6b0 --- /dev/null +++ b/components/phttp/ammo/simple/raw/provider.go @@ -0,0 +1,140 @@ +package raw + +import ( + "bufio" + "context" + "io" + + "github.com/pkg/errors" + "github.com/spf13/afero" + + "github.com/yandex/pandora/components/phttp/ammo/simple" +) + +/* +Parses size-prefixed HTTP ammo files. Each ammo is prefixed with a header line (delimited with \n), which consists of +two fields delimited by a space: ammo size and tag. Ammo size is in bytes (integer, including special characters like CR, LF). +Tag is a string. Example: + +77 bad +GET /abra HTTP/1.0 +Host: xxx.tanks.example.com +User-Agent: xxx (shell 1) + +904 +POST /upload/2 HTTP/1.0 +Content-Length: 801 +Host: xxxxxxxxx.dev.example.com +User-Agent: xxx (shell 1) + +^.^........W.j^1^.^.^.²..^^.i.^B.P..-!(.l/Y..V^. ...L?...S'NR.^^vm...3Gg@s...d'.\^.5N.$NF^,.Z^.aTE^. +._.[..k#L^ƨ`\RE.J.<.!,.q5.F^՚iΔĬq..^6..P..тH.`..i2 +.".uuzs^^F2...Rh.&.U.^^..J.P@.A......x..lǝy^?.u.p{4..g...m.,..R^.^.^......].^^.^J...p.ifTF0<.s.9V.o5<..%!6ļS.ƐǢ..㱋....C^&.....^.^y...v]^YT.1.#K.ibc...^.26... ..7. +b.$...j6.٨f...W.R7.^1.3....K`%.&^..d..{{ l0..^\..^X.g.^.r.(!.^^...4.1.$\ .%.8$(.n&..^^q.,.Q..^.D^.].^.R9.kE.^.$^.I..<..B^..^.h^^C.^E.|....3o^.@..Z.^.s.$[v. +527 +POST /upload/3 HTTP/1.0 +Content-Length: 424 +Host: xxxxxxxxx.dev.example.com +User-Agent: xxx (shell 1) + +^.^........QMO.0^.++^zJw.ر^$^.^Ѣ.^V.J....vM.8r&.T+...{@pk%~C.G../z顲^.7....l...-.^W"cR..... .&^?u.U^^.^.....{^.^..8.^.^.I.EĂ.p...'^.3.Tq..@R8....RAiBU..1.Bd*".7+. +.Ol.j=^.3..n....wp..,Wg.y^.T..~^.. +*/ + +func filePosition(file afero.File) (position int64) { + position, _ = file.Seek(0, io.SeekCurrent) + return +} + +type Config struct { + File string `validate:"required"` + // Limit limits total num of ammo. Unlimited if zero. + Limit int `validate:"min=0"` + // Redefine HTTP headers + Headers []string + // Passes limits ammo file passes. Unlimited if zero. + Passes int `validate:"min=0"` +} + +func NewProvider(fs afero.Fs, conf Config) *Provider { + var p Provider + p = Provider{ + Provider: simple.NewProvider(fs, conf.File, p.start), + Config: conf, + } + return &p +} + +type Provider struct { + simple.Provider + Config +} + +func (p *Provider) start(ctx context.Context, ammoFile afero.File) error { + var passNum int + var ammoNum int + // parse and prepare Headers from config + decodedConfigHeaders, err := decodeHTTPConfigHeaders(p.Config.Headers) + if err != nil { + return err + } + for { + passNum++ + reader := bufio.NewReader(ammoFile) + for p.Limit == 0 || ammoNum < p.Limit { + data, isPrefix, err := reader.ReadLine() + if isPrefix { + return errors.Errorf("too long header in ammo at position %v", filePosition(ammoFile)) + } + if err == io.EOF { + break // start over from the beginning + } + if err != nil { + return errors.Wrapf(err, "reading ammo failed at position: %v", filePosition(ammoFile)) + } + if len(data) == 0 { + continue // skip empty lines + } + reqSize, tag, err := decodeHeader(data) + if reqSize == 0 { + break // start over from the beginning of file if ammo size is 0 + } + buff := make([]byte, reqSize) + if n, err := io.ReadFull(reader, buff); err != nil { + return errors.Wrapf(err, "failed to read ammo at position: %v; tried to read: %v; have read: %v", filePosition(ammoFile), reqSize, n) + } + req, err := decodeRequest(buff) + if err != nil { + return errors.Wrapf(err, "failed to decode ammo at position: %v; data: %q", filePosition(ammoFile), buff) + } + + // redefine request Headers from config + for _, header := range decodedConfigHeaders { + // special behavior for `Host` header + if header.key == "Host" { + req.URL.Host = header.value + } else { + req.Header.Set(header.key, header.value) + } + } + + sh := p.Pool.Get().(*simple.Ammo) + sh.Reset(req, tag) + + select { + case p.Sink <- sh: + ammoNum++ + case <-ctx.Done(): + return ctx.Err() + } + } + if ammoNum == 0 { + return errors.New("no ammo in file") + } + if p.Passes != 0 && passNum >= p.Passes { + break + } + ammoFile.Seek(0, 0) + } + return nil +} diff --git a/components/phttp/ammo/simple/raw/provider_test.go b/components/phttp/ammo/simple/raw/provider_test.go new file mode 100644 index 000000000..547068f39 --- /dev/null +++ b/components/phttp/ammo/simple/raw/provider_test.go @@ -0,0 +1,184 @@ +package raw + +import ( + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "net/http" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/gstruct" + "github.com/spf13/afero" + + "github.com/yandex/pandora/components/phttp/ammo/simple" + "github.com/yandex/pandora/core" +) + +const testFile = "./ammo.stpd" +const testFileData = "../../../testdata/ammo.stpd" + +var testData = []ammoData{ + {"GET", "www.ya.ru", "/", http.Header{"Connection": []string{"close"}}, "", ""}, + {"GET", "www.ya.ru", "/test", http.Header{"Connection": []string{"close"}}, "", ""}, + {"GET", "www.ya.ru", "/test2", http.Header{"Connection": []string{"close"}}, "tag", ""}, + {"POST", "www.ya.ru", "/test3", http.Header{"Connection": []string{"close"}, "Content-Length": []string{"5"}}, "tag", "hello"}, +} + +type ammoData struct { + method string + host string + path string + header http.Header + tag string + body string +} + +var testFs = func() afero.Fs { + testFs := afero.NewOsFs() + testFileData, err := testFs.Open(testFileData) + if err != nil { + panic(err) + } + testDataBuffer, _ := ioutil.ReadAll(testFileData) + testFileData.Read(testDataBuffer) + fs := afero.NewMemMapFs() + err = afero.WriteFile(fs, testFile, testDataBuffer, 0) + if err != nil { + panic(err) + } + return afero.NewReadOnlyFs(fs) +}() + +func newTestProvider(conf Config) *Provider { + return NewProvider(testFs, conf) +} + +var _ = Describe("provider start", func() { + It("ok", func() { + p := newTestProvider(Config{File: testFile}) + ctx, cancel := context.WithCancel(context.Background()) + errch := make(chan error) + go func() { errch <- p.Run(ctx, core.ProviderDeps{}) }() + Expect(errch).NotTo(Receive()) + cancel() + var err error + Eventually(errch).Should(Receive(&err)) + Expect(err).To(Equal(ctx.Err())) + }) + + It("fail", func() { + p := newTestProvider(Config{File: "no_such_file"}) + Expect(p.Run(context.Background(), core.ProviderDeps{}), core.ProviderDeps{}).NotTo(BeNil()) + }) +}) +var _ = Describe("provider decode", func() { + var ( + // Configured in BeforeEach. + conf Config // File always is testFile. + expectedStartErr error + successReceives int // How much ammo should be received. + + provider *Provider + cancel context.CancelFunc + errch chan error + ammos []*simple.Ammo + ) + + BeforeEach(func() { + conf = Config{} + expectedStartErr = nil + ammos = nil + successReceives = 0 + }) + + JustBeforeEach(func() { + conf.File = testFile + provider = newTestProvider(conf) + errch = make(chan error) + var ctx context.Context + ctx, cancel = context.WithCancel(context.Background()) + go func() { errch <- provider.Run(ctx, core.ProviderDeps{}) }() + Expect(errch).NotTo(Receive()) + + for i := 0; i < successReceives; i++ { + By(fmt.Sprint(i)) + am, ok := provider.Acquire() + Expect(ok).To(BeTrue()) + ammos = append(ammos, am.(*simple.Ammo)) + } + }) + + AfterEach(func() { + _, ok := provider.Acquire() + Expect(ok).To(BeFalse(), "All ammo should be readed.") + defer cancel() + var err error + Eventually(errch).Should(Receive(&err)) + if expectedStartErr == nil { + Expect(err).To(BeNil()) + } else { + Expect(err).To(Equal(expectedStartErr)) + } + for i := 0; i < len(ammos); i++ { + expectedData := testData[i%len(testData)] + ammo := ammos[i] + req, ss := ammo.Request() + By(fmt.Sprintf("%v", i)) + Expect(*req).To(MatchFields(IgnoreExtras, Fields{ + "Method": Equal(expectedData.method), + "Proto": Equal("HTTP/1.1"), + "ProtoMajor": Equal(1), + "ProtoMinor": Equal(1), + "Host": Equal(expectedData.host), + "URL": PointTo(MatchFields(IgnoreExtras, Fields{ + "Scheme": BeEmpty(), + //"Host": Equal(expectedData.host), + "Path": Equal(expectedData.path), + })), + "Header": Equal(expectedData.header), + "RequestURI": Equal(""), + })) + Expect(ss.Tags()).To(Equal(expectedData.tag)) + var bout bytes.Buffer + if req.Body != nil { + _, err := io.Copy(&bout, req.Body) + Expect(err).To(BeNil()) + req.Body.Close() + } + Expect(bout.String()).To(Equal(expectedData.body)) + } + }) + + Context("unlimited", func() { + BeforeEach(func() { + successReceives = 5 * len(testData) + }) + It("ok", func() { + cancel() + expectedStartErr = context.Canceled + Eventually(provider.Sink, time.Second, time.Millisecond).Should(BeClosed()) + }) + }) + + Context("limit set", func() { + BeforeEach(func() { + conf.Passes = 2 + successReceives = len(testData) * conf.Passes + }) + It("ok", func() {}) + }) + + Context("passes set", func() { + BeforeEach(func() { + conf.Passes = 10 + conf.Limit = 5 + successReceives = conf.Limit + }) + It("ok", func() {}) + }) + +}) diff --git a/components/phttp/ammo/simple/raw/raw_suite_test.go b/components/phttp/ammo/simple/raw/raw_suite_test.go new file mode 100644 index 000000000..a49bf9d98 --- /dev/null +++ b/components/phttp/ammo/simple/raw/raw_suite_test.go @@ -0,0 +1,11 @@ +package raw + +import ( + "testing" + + "github.com/yandex/pandora/lib/ginkgoutil" +) + +func TestRaw(t *testing.T) { + ginkgoutil.RunSuite(t, "Raw Suite") +} diff --git a/components/phttp/ammo/simple/uri/decoder.go b/components/phttp/ammo/simple/uri/decoder.go new file mode 100644 index 000000000..5980fb466 --- /dev/null +++ b/components/phttp/ammo/simple/uri/decoder.go @@ -0,0 +1,143 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package uri + +import ( + "bytes" + "context" + "net/http" + "strings" + "sync" + + "github.com/pkg/errors" + + "github.com/yandex/pandora/components/phttp/ammo/simple" +) + +type decoder struct { + ctx context.Context + sink chan<- *simple.Ammo + pool *sync.Pool + + ammoNum int + header http.Header + configHeaders []ConfigHeader +} + +type ConfigHeader struct { + key string + value string +} + +func newDecoder(ctx context.Context, sink chan<- *simple.Ammo, pool *sync.Pool) *decoder { + return &decoder{ + sink: sink, + header: http.Header{}, + pool: pool, + ctx: ctx, + } +} + +func (d *decoder) Decode(line []byte) error { + // FIXME: rewrite decoder + // OPTIMIZE: reuse *http.Request, http.Header. Benchmark both variants. + if len(line) == 0 { + return errors.New("empty line") + } + line = bytes.TrimSpace(line) + switch line[0] { + case '/': + return d.decodeURI(line) + case '[': + return d.decodeHeader(line) + } + return errors.New("every line should begin with '[' or '/'") +} + +func (d *decoder) decodeURI(line []byte) error { + parts := strings.SplitN(string(line), " ", 2) + url := parts[0] + var tag string + if len(parts) > 1 { + tag = parts[1] + } + req, err := http.NewRequest("GET", string(url), nil) + if err != nil { + return errors.Wrap(err, "uri decode") + } + for k, v := range d.header { + // http.Request.Write sends Host header based on req.URL.Host + if k == "Host" { + req.Host = v[0] + req.URL.Host = v[0] + } else { + req.Header[k] = v + } + } + // redefine request Headers from config + for _, configHeader := range d.configHeaders { + if configHeader.key == "Host" { + req.Host = configHeader.value + req.URL.Host = configHeader.value + } else { + req.Header.Set(configHeader.key, configHeader.value) + } + } + + sh := d.pool.Get().(*simple.Ammo) + sh.Reset(req, tag) + select { + case d.sink <- sh: + d.ammoNum++ + return nil + case <-d.ctx.Done(): + return d.ctx.Err() + } +} + +func (d *decoder) decodeHeader(line []byte) error { + if len(line) < 3 || line[0] != '[' || line[len(line)-1] != ']' { + return errors.New("header line should be like '[key: value]") + } + line = line[1 : len(line)-1] + colonIdx := bytes.IndexByte(line, ':') + if colonIdx < 0 { + return errors.New("missing colon") + } + key := string(bytes.TrimSpace(line[:colonIdx])) + val := string(bytes.TrimSpace(line[colonIdx+1:])) + if key == "" { + return errors.New("missing header key") + } + d.header.Set(key, val) + return nil +} + +func (d *decoder) ResetHeader() { + for k := range d.header { + delete(d.header, k) + } +} + +func decodeHTTPConfigHeaders(headers []string) (configHTTPHeaders []ConfigHeader, err error) { + for _, header := range headers { + line := []byte(header) + if len(line) < 3 || line[0] != '[' || line[len(line)-1] != ']' { + return nil, errors.New("header line should be like '[key: value]") + } + line = line[1 : len(line)-1] + colonIdx := bytes.IndexByte(line, ':') + if colonIdx < 0 { + return nil, errors.New("missing colon") + } + preparedHeader := ConfigHeader{ + string(bytes.TrimSpace(line[:colonIdx])), + string(bytes.TrimSpace(line[colonIdx+1:])), + } + configHTTPHeaders = append(configHTTPHeaders, preparedHeader) + } + return +} diff --git a/components/phttp/ammo/simple/uri/decoder_test.go b/components/phttp/ammo/simple/uri/decoder_test.go new file mode 100644 index 000000000..37f6cf647 --- /dev/null +++ b/components/phttp/ammo/simple/uri/decoder_test.go @@ -0,0 +1,167 @@ +package uri + +import ( + "context" + "net/http" + "sync" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/extensions/table" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/gstruct" + + "github.com/yandex/pandora/components/phttp/ammo/simple" + "github.com/yandex/pandora/core" +) + +func newAmmoPool() *sync.Pool { + return &sync.Pool{New: func() interface{} { return &simple.Ammo{} }} + +} + +var _ = Describe("Decoder", func() { + It("uri decode ctx cancel", func() { + ctx, cancel := context.WithCancel(context.Background()) + decoder := newDecoder(ctx, make(chan *simple.Ammo), newAmmoPool()) + cancel() + err := decoder.Decode([]byte("/some/path")) + Expect(err).To(Equal(context.Canceled)) + }) + var ( + ammoCh chan *simple.Ammo + decoder *decoder + ) + BeforeEach(func() { + ammoCh = make(chan *simple.Ammo, 10) + decoder = newDecoder(context.Background(), ammoCh, newAmmoPool()) + }) + DescribeTable("invalid input", + func(line string) { + err := decoder.Decode([]byte(line)) + Expect(err).NotTo(BeNil()) + Expect(ammoCh).NotTo(Receive()) + Expect(decoder.header).To(BeEmpty()) + }, + Entry("empty line", ""), + Entry("line start", "test"), + Entry("empty header", "[ ]"), + Entry("no closing brace", "[key: val "), + Entry("no header key", "[ : val ]"), + Entry("no colon", "[ key val ]"), + ) + + Decode := func(line string) { + err := decoder.Decode([]byte(line)) + Expect(err).To(BeNil()) + } + It("uri", func() { + header := http.Header{"a": []string{"b"}, "c": []string{"d"}} + for k, v := range header { + decoder.header[k] = v + } + const host = "example.com" + decoder.header.Set("Host", host) + line := "/some/path" + Decode(line) + var am core.Ammo + Expect(ammoCh).To(Receive(&am)) + sh, ok := am.(*simple.Ammo) + Expect(ok).To(BeTrue()) + req, sample := sh.Request() + Expect(*req.URL).To(MatchFields(IgnoreExtras, Fields{ + "Path": Equal(line), + "Host": Equal(host), + "Scheme": BeEmpty(), + })) + Expect(req.Host).To(Equal(host)) + Expect(req.Header).To(Equal(header)) + header.Set("Host", host) + Expect(decoder.header).To(Equal(header)) + Expect(decoder.ammoNum).To(Equal(1)) + Expect(sample.Tags()).To(Equal("")) + }) + It("uri and tag", func() { + header := http.Header{"a": []string{"b"}, "c": []string{"d"}} + for k, v := range header { + decoder.header[k] = v + } + const host = "example.com" + decoder.header.Set("Host", host) + line := "/some/path some tag" + Decode(line) + var am core.Ammo + Expect(ammoCh).To(Receive(&am)) + sh, ok := am.(*simple.Ammo) + Expect(ok).To(BeTrue()) + req, sample := sh.Request() + Expect(*req.URL).To(MatchFields(IgnoreExtras, Fields{ + "Path": Equal("/some/path"), + "Host": Equal(host), + "Scheme": BeEmpty(), + })) + Expect(req.Host).To(Equal(host)) + Expect(req.Header).To(Equal(header)) + header.Set("Host", host) + Expect(decoder.header).To(Equal(header)) + Expect(decoder.ammoNum).To(Equal(1)) + Expect(sample.Tags()).To(Equal("some tag")) + }) + Context("header", func() { + AfterEach(func() { + Expect(decoder.ammoNum).To(BeZero()) + }) + It("overwrite", func() { + decoder.header.Set("A", "b") + Decode("[A: c]") + Expect(decoder.header).To(Equal(http.Header{ + "A": []string{"c"}, + })) + }) + It("add", func() { + decoder.header.Set("A", "b") + Decode("[C: d]") + Expect(decoder.header).To(Equal(http.Header{ + "A": []string{"b"}, + "C": []string{"d"}, + })) + }) + It("spaces", func() { + Decode(" [ C : d ] ") + Expect(decoder.header).To(Equal(http.Header{ + "C": []string{"d"}, + })) + }) + It("value colons", func() { + Decode("[C:c:d]") + Expect(decoder.header).To(Equal(http.Header{ + "C": []string{"c:d"}, + })) + }) + It("empty value", func() { + Decode("[C:]") + Expect(decoder.header).To(Equal(http.Header{ + "C": []string{""}, + })) + }) + It("overwrite by config", func() { + decodedConfigHeaders, _ := decodeHTTPConfigHeaders([]string{ + "[Host: youhost.tld]", + "[SomeHeader: somevalue]", + }) + decoder.configHeaders = decodedConfigHeaders + cfgHeaders := []ConfigHeader{ + {"Host", "youhost.tld"}, + {"SomeHeader", "somevalue"}, + } + Expect(decoder.configHeaders).To(Equal(cfgHeaders)) + }) + }) + It("Reset", func() { + decoder.header.Set("a", "b") + decoder.ammoNum = 10 + decoder.ResetHeader() + Expect(decoder.header).To(BeEmpty()) + Expect(decoder.ammoNum).To(Equal(10)) + }) + +}) diff --git a/components/phttp/ammo/simple/uri/provider.go b/components/phttp/ammo/simple/uri/provider.go new file mode 100644 index 000000000..643cfe300 --- /dev/null +++ b/components/phttp/ammo/simple/uri/provider.go @@ -0,0 +1,77 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package uri + +import ( + "bufio" + "context" + + "github.com/pkg/errors" + "github.com/spf13/afero" + + "github.com/yandex/pandora/components/phttp/ammo/simple" +) + +type Config struct { + File string `validate:"required"` + // Limit limits total num of ammo. Unlimited if zero. + Limit int `validate:"min=0"` + // Redefine HTTP headers + Headers []string + // Passes limits ammo file passes. Unlimited if zero. + Passes int `validate:"min=0"` +} + +// TODO: pass logger and metricsRegistry +func NewProvider(fs afero.Fs, conf Config) *Provider { + var p Provider + p = Provider{ + Provider: simple.NewProvider(fs, conf.File, p.start), + Config: conf, + } + return &p +} + +type Provider struct { + simple.Provider + Config + + decoder *decoder // Initialized on start. +} + +func (p *Provider) start(ctx context.Context, ammoFile afero.File) error { + p.decoder = newDecoder(ctx, p.Sink, &p.Pool) + // parse and prepare Headers from config + decodedConfigHeaders, err := decodeHTTPConfigHeaders(p.Config.Headers) + if err != nil { + return err + } + p.decoder.configHeaders = decodedConfigHeaders + var passNum int + for { + passNum++ + scanner := bufio.NewScanner(ammoFile) + for line := 1; scanner.Scan() && (p.Limit == 0 || p.decoder.ammoNum < p.Limit); line++ { + data := scanner.Bytes() + if len(data) == 0 { + continue // skip empty lines + } + err := p.decoder.Decode(data) + if err != nil { + return errors.Wrapf(err, "failed to decode ammo at line: %v; data: %q", line, data) + } + } + if p.decoder.ammoNum == 0 { + return errors.New("no ammo in file") + } + if p.Passes != 0 && passNum >= p.Passes { + break + } + ammoFile.Seek(0, 0) + p.decoder.ResetHeader() + } + return nil +} diff --git a/components/phttp/ammo/simple/uri/provider_test.go b/components/phttp/ammo/simple/uri/provider_test.go new file mode 100644 index 000000000..a0d05411b --- /dev/null +++ b/components/phttp/ammo/simple/uri/provider_test.go @@ -0,0 +1,187 @@ +package uri + +import ( + "context" + "fmt" + "net/http" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/gstruct" + "github.com/spf13/afero" + + "github.com/pkg/errors" + "github.com/yandex/pandora/components/phttp/ammo/simple" + "github.com/yandex/pandora/core" +) + +const testFile = "./ammo.uri" +const testFileData = `/0 +[A:b] +/1 +[Host : example.com] +[ C : d ] +/2 +[A:] +[Host : other.net] + +/3 +/4 some tag +` + +var testData = []ammoData{ + {"", "/0", http.Header{}, ""}, + {"", "/1", http.Header{"A": []string{"b"}}, ""}, + {"example.com", "/2", http.Header{ + "A": []string{"b"}, + "C": []string{"d"}, + }, ""}, + {"other.net", "/3", http.Header{ + "A": []string{""}, + "C": []string{"d"}, + }, ""}, + {"other.net", "/4", http.Header{ + "A": []string{""}, + "C": []string{"d"}, + }, "some tag"}, +} + +type ammoData struct { + host string + path string + header http.Header + tag string +} + +var testFs = func() afero.Fs { + fs := afero.NewMemMapFs() + err := afero.WriteFile(fs, testFile, []byte(testFileData), 0) + if err != nil { + panic(err) + } + return afero.NewReadOnlyFs(fs) +}() + +func newTestProvider(conf Config) *Provider { + return NewProvider(testFs, conf) +} + +var _ = Describe("provider start", func() { + It("ok", func() { + p := newTestProvider(Config{File: testFile}) + ctx, cancel := context.WithCancel(context.Background()) + errch := make(chan error) + go func() { errch <- p.Run(ctx, core.ProviderDeps{}) }() + Expect(errch).NotTo(Receive()) + cancel() + var err error + Eventually(errch).Should(Receive(&err)) + Expect(errors.Cause(err)).To(Equal(ctx.Err())) + }) + + It("fail", func() { + p := newTestProvider(Config{File: "no_such_file"}) + Expect(p.Run(context.Background(), core.ProviderDeps{})).NotTo(BeNil()) + }) +}) +var _ = Describe("provider decode", func() { + var ( + // Configured in BeforeEach. + conf Config // File always is testFile. + expectedStartErr error + successReceives int // How much ammo should be received. + + provider *Provider + cancel context.CancelFunc + errch chan error + ammos []*simple.Ammo + ) + + BeforeEach(func() { + conf = Config{} + expectedStartErr = nil + ammos = nil + successReceives = 0 + }) + + JustBeforeEach(func() { + conf.File = testFile + provider = newTestProvider(conf) + errch = make(chan error) + var ctx context.Context + ctx, cancel = context.WithCancel(context.Background()) + go func() { errch <- provider.Run(ctx, core.ProviderDeps{}) }() + Expect(errch).NotTo(Receive()) + + for i := 0; i < successReceives; i++ { + By(fmt.Sprint(i)) + am, ok := provider.Acquire() + Expect(ok).To(BeTrue()) + ammos = append(ammos, am.(*simple.Ammo)) + } + }) + + AfterEach(func() { + _, ok := provider.Acquire() + Expect(ok).To(BeFalse(), "All ammo should be readed.") + defer cancel() + var err error + Eventually(errch).Should(Receive(&err)) + if expectedStartErr == nil { + Expect(err).NotTo(HaveOccurred()) + } else { + Expect(errors.Cause(err)).To(Equal(expectedStartErr)) + } + for i := 0; i < len(ammos); i++ { + expectedData := testData[i%len(testData)] + ammo := ammos[i] + req, ss := ammo.Request() + By(fmt.Sprintf("%v", i)) + Expect(*req).To(MatchFields(IgnoreExtras, Fields{ + "Method": Equal("GET"), + "Proto": Equal("HTTP/1.1"), + "ProtoMajor": Equal(1), + "ProtoMinor": Equal(1), + "Body": BeNil(), + "Host": Equal(expectedData.host), + "URL": PointTo(MatchFields(IgnoreExtras, Fields{ + "Scheme": BeEmpty(), + "Host": Equal(expectedData.host), + "Path": Equal(expectedData.path), + })), + "Header": Equal(expectedData.header), + })) + Expect(ss.Tags()).To(Equal(expectedData.tag)) + } + }) + + Context("unlimited", func() { + BeforeEach(func() { + successReceives = 5 * len(testData) + }) + It("ok", func() { + cancel() + expectedStartErr = context.Canceled + Eventually(provider.Sink, time.Second, time.Millisecond).Should(BeClosed()) + }) + }) + + Context("limit set", func() { + BeforeEach(func() { + conf.Passes = 2 + successReceives = len(testData) * conf.Passes + }) + It("ok", func() {}) + }) + + Context("passes set", func() { + BeforeEach(func() { + conf.Passes = 10 + conf.Limit = 5 + successReceives = conf.Limit + }) + It("ok", func() {}) + }) + +}) diff --git a/components/phttp/ammo/simple/uri/uri_suite_test.go b/components/phttp/ammo/simple/uri/uri_suite_test.go new file mode 100644 index 000000000..43d23b7b3 --- /dev/null +++ b/components/phttp/ammo/simple/uri/uri_suite_test.go @@ -0,0 +1,11 @@ +package uri + +import ( + "testing" + + "github.com/yandex/pandora/lib/ginkgoutil" +) + +func TestUri(t *testing.T) { + ginkgoutil.RunSuite(t, "Uri Suite") +} diff --git a/components/phttp/base.go b/components/phttp/base.go new file mode 100644 index 000000000..bf8eba67d --- /dev/null +++ b/components/phttp/base.go @@ -0,0 +1,185 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package phttp + +import ( + "context" + "io" + "io/ioutil" + "net/http" + "net/url" + + "github.com/pkg/errors" + "go.uber.org/zap" + + "github.com/yandex/pandora/core" + "github.com/yandex/pandora/core/aggregator/netsample" +) + +const ( + EmptyTag = "__EMPTY__" +) + +type BaseGunConfig struct { + AutoTag AutoTagConfig `config:"auto-tag"` +} + +// AutoTagConfig configure automatic tags generation based on ammo URI. First AutoTag URI path elements becomes tag. +// Example: /my/very/deep/page?id=23¶m=33 -> /my/very when uri-elements: 2. +type AutoTagConfig struct { + Enabled bool `config:"enabled"` + URIElements int `config:"uri-elements" validate:"min=1"` // URI elements used to autotagging + NoTagOnly bool `config:"no-tag-only"` // When true, autotagged only ammo that has no tag before. +} + +func DefaultBaseGunConfig() BaseGunConfig { + return BaseGunConfig{ + AutoTagConfig{ + Enabled: false, + URIElements: 2, + NoTagOnly: true, + }} +} + +type BaseGun struct { + DebugLog bool // Automaticaly set in Bind if Log accepts debug messages. + Config BaseGunConfig + Do func(r *http.Request) (*http.Response, error) // Required. + Connect func(ctx context.Context) error // Optional hook. + OnClose func() error // Optional. Called on Close(). + Aggregator netsample.Aggregator // Lazy set via BindResultTo. + core.GunDeps +} + +var _ Gun = (*BaseGun)(nil) +var _ io.Closer = (*BaseGun)(nil) + +func (b *BaseGun) Bind(aggregator netsample.Aggregator, deps core.GunDeps) error { + log := deps.Log + if ent := log.Check(zap.DebugLevel, "Gun bind"); ent != nil { + // Enable debug level logging during shooting. Creating log entries isn't free. + b.DebugLog = true + } + + if b.Aggregator != nil { + log.Panic("already binded") + } + if aggregator == nil { + log.Panic("nil aggregator") + } + b.Aggregator = aggregator + b.GunDeps = deps + + return nil +} + +// Shoot is thread safe iff Do and Connect hooks are thread safe. +func (b *BaseGun) Shoot(ammo Ammo) { + if b.Aggregator == nil { + zap.L().Panic("must bind before shoot") + } + if b.Connect != nil { + err := b.Connect(b.Ctx) + if err != nil { + b.Log.Warn("Connect fail", zap.Error(err)) + return + } + } + + req, sample := ammo.Request() + if b.DebugLog { + b.Log.Debug("Prepared ammo to shoot", zap.Stringer("url", req.URL)) + } + + if b.Config.AutoTag.Enabled && (!b.Config.AutoTag.NoTagOnly || sample.Tags() == "") { + sample.AddTag(autotag(b.Config.AutoTag.URIElements, req.URL)) + } + if sample.Tags() == "" { + sample.AddTag(EmptyTag) + } + + var err error + defer func() { + if err != nil { + sample.SetErr(err) + } + b.Aggregator.Report(sample) + err = errors.WithStack(err) + }() + + var res *http.Response + res, err = b.Do(req) + if err != nil { + b.Log.Warn("Request fail", zap.Error(err)) + return + } + + if b.DebugLog { + b.verboseLogging(res) + } + + sample.SetProtoCode(res.StatusCode) + defer res.Body.Close() + // TODO: measure body read time + _, err = io.Copy(ioutil.Discard, res.Body) // Buffers are pooled for ioutil.Discard + if err != nil { + b.Log.Warn("Body read fail", zap.Error(err)) + return + } +} + +func (b *BaseGun) Close() error { + if b.OnClose != nil { + return b.OnClose() + } + return nil +} + +func (b *BaseGun) verboseLogging(res *http.Response) { + if res.Request.Body != nil { + reqBody, err := ioutil.ReadAll(res.Request.Body) + if err != nil { + b.Log.Debug("Body read failed for verbose logging of Request") + } else { + b.Log.Debug("Request body", zap.ByteString("Body", reqBody)) + } + } + b.Log.Debug( + "Request debug info", + zap.String("URL", res.Request.URL.String()), + zap.String("Host", res.Request.Host), + zap.Any("Headers", res.Request.Header), + ) + + if res.Body != nil { + respBody, err := ioutil.ReadAll(res.Body) + if err != nil { + b.Log.Debug("Body read failed for verbose logging of Response") + } else { + b.Log.Debug("Response body", zap.ByteString("Body", respBody)) + } + } + b.Log.Debug( + "Response debug info", + zap.Int("Status Code", res.StatusCode), + zap.String("Status", res.Status), + zap.Any("Headers", res.Header), + ) +} + +func autotag(depth int, URL *url.URL) string { + path := URL.Path + var ind int + for ; ind < len(path); ind++ { + if path[ind] == '/' { + if depth == 0 { + break + } + depth-- + } + } + return path[:ind] +} diff --git a/components/phttp/base_test.go b/components/phttp/base_test.go new file mode 100644 index 000000000..1b28186f8 --- /dev/null +++ b/components/phttp/base_test.go @@ -0,0 +1,211 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor +package phttp + +import ( + "context" + "errors" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "strings" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/extensions/table" + . "github.com/onsi/gomega" + "github.com/stretchr/testify/mock" + + ammomock "github.com/yandex/pandora/components/phttp/mocks" + "github.com/yandex/pandora/core" + "github.com/yandex/pandora/core/aggregator/netsample" + "github.com/yandex/pandora/core/coretest" + "github.com/yandex/pandora/lib/ginkgoutil" +) + +func testDeps() core.GunDeps { + return core.GunDeps{ + Log: ginkgoutil.NewLogger(), + Ctx: context.Background(), + } +} + +var _ = Describe("BaseGun", func() { + + var ( + base BaseGun + ) + BeforeEach(func() { + base = BaseGun{Config: DefaultBaseGunConfig()} + }) + + Context("BindResultTo", func() { + It("nil panics", func() { + Expect(func() { + base.Bind(nil, testDeps()) + }).To(Panic()) + }) + It("second time panics", func() { + res := &netsample.TestAggregator{} + base.Bind(res, testDeps()) + Expect(base.Aggregator).To(Equal(res)) + Expect(func() { + base.Bind(&netsample.TestAggregator{}, testDeps()) + }).To(Panic()) + }) + }) + + It("Shoot before bind panics", func() { + base.Do = func(*http.Request) (_ *http.Response, _ error) { + Fail("should not be called") + return + } + am := &ammomock.Ammo{} + am.On("Request").Return(nil, nil).Run( + func(mock.Arguments) { + Fail("should not be caled") + }) + Expect(func() { + base.Shoot(am) + }).To(Panic()) + }, 1) + + Context("Shoot", func() { + var ( + body io.ReadCloser + + am *ammomock.Ammo + req *http.Request + tag string + res *http.Response + sample *netsample.Sample + results *netsample.TestAggregator + shootErr error + ) + BeforeEach(func() { + am = &ammomock.Ammo{} + req = httptest.NewRequest("GET", "/1/2/3/4", nil) + tag = "" + results = &netsample.TestAggregator{} + base.Bind(results, testDeps()) + }) + + JustBeforeEach(func() { + sample = netsample.Acquire(tag) + am.On("Request").Return(req, sample) + res = &http.Response{ + StatusCode: http.StatusNotFound, + Body: ioutil.NopCloser(body), + Request: req, + } + base.Shoot(am) + Expect(results.Samples).To(HaveLen(1)) + shootErr = results.Samples[0].Err() + }) + + Context("Do ok", func() { + BeforeEach(func() { + body = ioutil.NopCloser(strings.NewReader("aaaaaaa")) + base.Do = func(doReq *http.Request) (*http.Response, error) { + Expect(doReq).To(Equal(req)) + return res, nil + } + }) + + It("ammo sample sent to results", func() { + Expect(results.Samples).To(HaveLen(1)) + Expect(results.Samples[0]).To(Equal(sample)) + Expect(sample.Tags()).To(Equal("__EMPTY__")) + Expect(sample.ProtoCode()).To(Equal(res.StatusCode)) + }) + It("body read well", func() { + Expect(shootErr).To(BeNil()) + _, err := body.Read([]byte{0}) + Expect(err).To(Equal(io.EOF), "body should be read fully") + }) + + Context("autotag options is set", func() { + BeforeEach(func() { base.Config.AutoTag.Enabled = true }) + It("autotagged", func() { + Expect(sample.Tags()).To(Equal("/1/2")) + }) + + Context("tag is already set", func() { + const presetTag = "TAG" + BeforeEach(func() { tag = presetTag }) + It("no tag added", func() { + Expect(sample.Tags()).To(Equal(presetTag)) + }) + + Context("no-tag-only set to false", func() { + BeforeEach(func() { base.Config.AutoTag.NoTagOnly = false }) + It("autotag added", func() { + Expect(sample.Tags()).To(Equal(presetTag + "|/1/2")) + }) + }) + }) + }) + + Context("Connect set", func() { + var connectCalled, doCalled bool + BeforeEach(func() { + base.Connect = func(ctx context.Context) error { + connectCalled = true + return nil + } + oldDo := base.Do + base.Do = func(r *http.Request) (*http.Response, error) { + doCalled = true + return oldDo(r) + } + }) + It("Connect called", func() { + Expect(shootErr).To(BeNil()) + Expect(connectCalled).To(BeTrue()) + Expect(doCalled).To(BeTrue()) + }) + }) + Context("Connect failed", func() { + connectErr := errors.New("connect error") + BeforeEach(func() { + base.Connect = func(ctx context.Context) error { + // Connect should report fail in sample itself. + s := netsample.Acquire("") + s.SetErr(connectErr) + results.Report(s) + return connectErr + } + }) + It("Shoot failed", func() { + Expect(shootErr).NotTo(BeNil()) + Expect(shootErr).To(Equal(connectErr)) + }) + }) + }) + }) + + DescribeTable("autotag", + func(path string, depth int, tag string) { + URL := &url.URL{Path: path} + Expect(autotag(depth, URL)).To(Equal(tag)) + }, + Entry("empty", "", 2, ""), + Entry("root", "/", 2, "/"), + Entry("exact depth", "/1/2", 2, "/1/2"), + Entry("more depth", "/1/2", 3, "/1/2"), + Entry("less depth", "/1/2", 1, "/1"), + ) + + It("config decode", func() { + var conf BaseGunConfig + coretest.DecodeAndValidate(` +auto-tag: + enabled: true + uri-elements: 3 + no-tag-only: false +`, &conf) + }) +}) diff --git a/components/phttp/client.go b/components/phttp/client.go new file mode 100644 index 000000000..0aea86e07 --- /dev/null +++ b/components/phttp/client.go @@ -0,0 +1,168 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package phttp + +import ( + "crypto/tls" + "net" + "net/http" + "time" + + "go.uber.org/zap" + "golang.org/x/net/http2" + + "github.com/pkg/errors" + "github.com/yandex/pandora/core/config" + "github.com/yandex/pandora/lib/netutil" +) + +//go:generate mockery -name=Client -case=underscore -inpkg -testonly + +type Client interface { + Do(req *http.Request) (*http.Response, error) + CloseIdleConnections() // We should close idle conns after gun close. +} + +type ClientConfig struct { + Redirect bool // When true, follow HTTP redirects. + Dialer DialerConfig `config:"dial"` + Transport TransportConfig `config:",squash"` +} + +func DefaultClientConfig() ClientConfig { + return ClientConfig{ + Transport: DefaultTransportConfig(), + Dialer: DefaultDialerConfig(), + Redirect: false, + } +} + +// DialerConfig can be mapped on net.Dialer. +// Set net.Dialer for details. +type DialerConfig struct { + DNSCache bool `config:"dns-cache" map:"-"` + + Timeout time.Duration `config:"timeout"` + DualStack bool `config:"dual-stack"` + + // IPv4/IPv6 settings should not matter really, + // because target should be dialed using pre-resolved addr. + FallbackDelay time.Duration `config:"fallback-delay"` + KeepAlive time.Duration `config:"keep-alive"` +} + +func DefaultDialerConfig() DialerConfig { + return DialerConfig{ + DNSCache: true, + DualStack: true, + Timeout: 3 * time.Second, + KeepAlive: 120 * time.Second, + } +} + +func NewDialer(conf DialerConfig) netutil.Dialer { + d := &net.Dialer{} + config.Map(d, conf) + if !conf.DNSCache { + return d + } + return netutil.NewDNSCachingDialer(d, netutil.DefaultDNSCache) +} + +// TransportConfig can be mapped on http.Transport. +// See http.Transport for details. +type TransportConfig struct { + TLSHandshakeTimeout time.Duration `config:"tls-handshake-timeout"` + DisableKeepAlives bool `config:"disable-keep-alives"` + DisableCompression bool `config:"disable-compression"` + MaxIdleConns int `config:"max-idle-conns"` + MaxIdleConnsPerHost int `config:"max-idle-conns-per-host"` + IdleConnTimeout time.Duration `config:"idle-conn-timeout"` + ResponseHeaderTimeout time.Duration `config:"response-header-timeout"` + ExpectContinueTimeout time.Duration `config:"expect-continue-timeout"` +} + +func DefaultTransportConfig() TransportConfig { + return TransportConfig{ + MaxIdleConns: 0, // No limit. + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 1 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + DisableCompression: true, + } +} + +func NewTransport(conf TransportConfig, dial netutil.DialerFunc) *http.Transport { + tr := &http.Transport{} + tr.TLSClientConfig = &tls.Config{ + InsecureSkipVerify: true, // We should not spend time for this stuff. + NextProtos: []string{"http/1.1"}, // Disable HTTP/2. Use HTTP/2 transport explicitly, if needed. + } + config.Map(tr, conf) + tr.DialContext = dial + return tr +} + +func NewHTTP2Transport(conf TransportConfig, dial netutil.DialerFunc) *http.Transport { + tr := NewTransport(conf, dial) + err := http2.ConfigureTransport(tr) + if err != nil { + zap.L().Panic("HTTP/2 transport configure fail", zap.Error(err)) + } + tr.TLSClientConfig.NextProtos = []string{"h2"} + return tr +} + +func newClient(tr *http.Transport, redirect bool) Client { + if redirect { + return redirectClient{&http.Client{Transport: tr}} + } + return noRedirectClient{tr} +} + +type redirectClient struct{ *http.Client } + +func (c redirectClient) CloseIdleConnections() { + c.Transport.(*http.Transport).CloseIdleConnections() +} + +type noRedirectClient struct{ *http.Transport } + +func (c noRedirectClient) Do(req *http.Request) (*http.Response, error) { + return c.Transport.RoundTrip(req) +} + +// Used to cancel shooting in HTTP/2 gun, when target doesn't support HTTP/2 +type panicOnHTTP1Client struct { + Client +} + +const notHTTP2PanicMsg = "Non HTTP/2 connection established. Seems that target doesn't support HTTP/2." + +func (c *panicOnHTTP1Client) Do(req *http.Request) (*http.Response, error) { + res, err := c.Client.Do(req) + if err != nil { + return nil, err + } + err = checkHTTP2(res.TLS) + if err != nil { + zap.L().Panic(notHTTP2PanicMsg, zap.Error(err)) + } + return res, nil +} + +func checkHTTP2(state *tls.ConnectionState) error { + if state == nil { + return errors.New("http2: non TLS connection") + } + if p := state.NegotiatedProtocol; p != http2.NextProtoTLS { + return errors.Errorf("http2: unexpected ALPN protocol %q; want %q", p, http2.NextProtoTLS) + } + if !state.NegotiatedProtocolIsMutual { + return errors.New("http2: could not negotiate protocol mutually") + } + return nil +} diff --git a/components/phttp/connect.go b/components/phttp/connect.go new file mode 100644 index 000000000..bac6a8b5f --- /dev/null +++ b/components/phttp/connect.go @@ -0,0 +1,148 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package phttp + +import ( + "bufio" + "context" + "crypto/tls" + "net" + "net/http" + "net/http/httputil" + "net/url" + + "github.com/pkg/errors" + "github.com/yandex/pandora/lib/netutil" +) + +type ConnectGunConfig struct { + Target string `validate:"endpoint,required"` + ConnectSSL bool `config:"connect-ssl"` // Defines if tunnel encrypted. + SSL bool // As in HTTP gun, defines scheme for http requests. + Client ClientConfig `config:",squash"` + BaseGunConfig `config:",squash"` +} + +func NewConnectGun(conf ConnectGunConfig) *ConnectGun { + scheme := "http" + if conf.SSL { + scheme = "https" + } + client := newConnectClient(conf) + var g ConnectGun + g = ConnectGun{ + BaseGun: BaseGun{ + Config: conf.BaseGunConfig, + Do: g.Do, + OnClose: func() error { + client.CloseIdleConnections() + return nil + }, + }, + scheme: scheme, + client: client, + } + return &g +} + +type ConnectGun struct { + BaseGun + scheme string + client Client +} + +var _ Gun = (*ConnectGun)(nil) + +func (g *ConnectGun) Do(req *http.Request) (*http.Response, error) { + req.URL.Scheme = g.scheme + return g.client.Do(req) +} + +func DefaultConnectGunConfig() ConnectGunConfig { + return ConnectGunConfig{ + SSL: false, + ConnectSSL: false, + Client: DefaultClientConfig(), + } +} + +func newConnectClient(conf ConnectGunConfig) Client { + transport := NewTransport(conf.Client.Transport, + newConnectDialFunc( + conf.Target, + conf.ConnectSSL, + NewDialer(conf.Client.Dialer), + )) + return newClient(transport, conf.Client.Redirect) +} + +func newConnectDialFunc(target string, connectSSL bool, dialer netutil.Dialer) netutil.DialerFunc { + return func(ctx context.Context, network, address string) (conn net.Conn, err error) { + // TODO(skipor): make connect sample. + // TODO(skipor): make httptrace callbacks called correctly. + if network != "tcp" { + panic("unsupported network " + network) + } + defer func() { + if err != nil && conn != nil { + conn.Close() + conn = nil + } + }() + conn, err = dialer.DialContext(ctx, "tcp", target) + if err != nil { + err = errors.WithStack(err) + return + } + if connectSSL { + conn = tls.Client(conn, &tls.Config{InsecureSkipVerify: true}) + } + req := &http.Request{ + Method: "CONNECT", + URL: &url.URL{}, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: make(http.Header), + Body: nil, + Host: address, + } + // NOTE(skipor): any logic for CONNECT request can be easily added via hooks. + err = req.Write(conn) + if err != nil { + err = errors.WithStack(err) + return + } + // NOTE(skipor): according to RFC 2817 we can send origin at that moment and not wait + // for request. That requires to wrap conn and do following logic at first read. + r := bufio.NewReader(conn) + res, err := http.ReadResponse(r, req) + if err != nil { + err = errors.WithStack(err) + return + } + // RFC 7230 3.3.3.2: Any 2xx (Successful) response to a CONNECT request implies that + // the connection will become a tunnel immediately after the empty + // line that concludes the header fields. A client MUST ignore any + // Content-Length or Transfer-Encoding header fields received in + // such a message. + if res.StatusCode != http.StatusOK { + dump, dumpErr := httputil.DumpResponse(res, false) + err = errors.Errorf("Unexpected status code. Dumped response:\n%s\n Dump error: %s", + dump, dumpErr) + return + } + // No need to close body. + if r.Buffered() != 0 { + // Already receive something non HTTP from proxy or dialed server. + // Anyway it is incorrect situation. + peek, _ := r.Peek(r.Buffered()) + err = errors.Errorf("Unexpected extra data after connect: %q", peek) + return + } + return + } +} diff --git a/components/phttp/connect_test.go b/components/phttp/connect_test.go new file mode 100644 index 000000000..39339e6d5 --- /dev/null +++ b/components/phttp/connect_test.go @@ -0,0 +1,101 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package phttp + +import ( + "io" + "net" + "net/http" + "net/http/httptest" + "net/url" + "strings" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/yandex/pandora/core/aggregator/netsample" +) + +var _ = Describe("connect", func() { + tunnelHandler := func(originURL string) http.Handler { + u, err := url.Parse(originURL) + Expect(err).To(BeNil()) + originHost := u.Host + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer GinkgoRecover() + Expect(originHost).To(Equal(r.RequestURI)) + toOrigin, err := net.Dial("tcp", originHost) + Expect(err).To(BeNil()) + conn, bufReader, err := w.(http.Hijacker).Hijack() + Expect(err).To(BeNil()) + Expect(bufReader.Reader.Buffered()).To(BeZero(), + "Current implementation should not send requested data before got response.") + _, err = io.WriteString(conn, "HTTP/1.1 200 Connection established\r\n\r\n") + Expect(err).To(BeNil()) + go func() { io.Copy(toOrigin, conn) }() + go func() { io.Copy(conn, toOrigin) }() + }) + } + + testClient := func(tunnelSSL bool) func() { + return func() { + origin := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + rw.WriteHeader(http.StatusOK) + })) + defer origin.Close() + + var proxy *httptest.Server + if tunnelSSL { + proxy = httptest.NewTLSServer(tunnelHandler(origin.URL)) + } else { + proxy = httptest.NewServer(tunnelHandler(origin.URL)) + } + defer proxy.Close() + + req, err := http.NewRequest("GET", origin.URL, nil) + Expect(err).To(BeNil()) + + conf := DefaultConnectGunConfig() + conf.ConnectSSL = tunnelSSL + scheme := "http://" + if tunnelSSL { + scheme = "https://" + } + conf.Target = strings.TrimPrefix(proxy.URL, scheme) + + client := newConnectClient(conf) + + res, err := client.Do(req) + Expect(err).To(BeNil()) + Expect(res.StatusCode).To(Equal(http.StatusOK)) + } + } + + It("HTTP client", testClient(false)) + It("HTTPS client", testClient(true)) + + It("gun", func() { + origin := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + rw.WriteHeader(http.StatusOK) + })) + defer origin.Close() + proxy := httptest.NewServer(tunnelHandler(origin.URL)) + defer proxy.Close() + + conf := DefaultConnectGunConfig() + conf.Target = proxy.Listener.Addr().String() + connectGun := NewConnectGun(conf) + + results := &netsample.TestAggregator{} + connectGun.Bind(results, testDeps()) + + connectGun.Shoot(newAmmoURL(origin.URL)) + Expect(results.Samples[0].Err()).To(BeNil()) + + Expect(results.Samples).To(HaveLen(1)) + Expect(results.Samples[0].ProtoCode()).To(Equal(http.StatusOK)) + }) +}) diff --git a/components/phttp/core.go b/components/phttp/core.go new file mode 100644 index 000000000..0615255ed --- /dev/null +++ b/components/phttp/core.go @@ -0,0 +1,48 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package phttp + +import ( + "net/http" + + "github.com/yandex/pandora/core" + "github.com/yandex/pandora/core/aggregator/netsample" +) + +//go:generate mockery -name=Ammo -case=underscore -outpkg=ammomock + +// Ammo ammo interface for http based guns. +// http ammo providers should produce ammo that implements Ammo. +// http guns should use convert ammo to Ammo, not to specific implementation. +// Returned request have +type Ammo interface { + // TODO(skipor): instead of sample use it wrapper with httptrace and more usable interface. + Request() (*http.Request, *netsample.Sample) + // Id unique ammo id. Usually equals to ammo num got from provider. + Id() int +} + +type Gun interface { + Shoot(ammo Ammo) + Bind(sample netsample.Aggregator, deps core.GunDeps) error +} + +func WrapGun(g Gun) core.Gun { + if g == nil { + return nil + } + return &gunWrapper{g} +} + +type gunWrapper struct{ Gun } + +func (g *gunWrapper) Shoot(ammo core.Ammo) { + g.Gun.Shoot(ammo.(Ammo)) +} + +func (g *gunWrapper) Bind(a core.Aggregator, deps core.GunDeps) error { + return g.Gun.Bind(netsample.UnwrapAggregator(a), deps) +} diff --git a/components/phttp/doc.go b/components/phttp/doc.go new file mode 100644 index 000000000..067925107 --- /dev/null +++ b/components/phttp/doc.go @@ -0,0 +1,7 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +// package phttp (pandora http) contains pandora extension points for HTTP related protocols. +package phttp diff --git a/components/phttp/http.go b/components/phttp/http.go new file mode 100644 index 000000000..c057b49f6 --- /dev/null +++ b/components/phttp/http.go @@ -0,0 +1,108 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package phttp + +import ( + "net/http" + + "github.com/pkg/errors" +) + +type ClientGunConfig struct { + Target string `validate:"endpoint,required"` + SSL bool + Base BaseGunConfig `config:",squash"` +} + +type HTTPGunConfig struct { + Gun ClientGunConfig `config:",squash"` + Client ClientConfig `config:",squash"` +} + +type HTTP2GunConfig struct { + Gun ClientGunConfig `config:",squash"` + Client ClientConfig `config:",squash"` +} + +func NewHTTPGun(conf HTTPGunConfig) *HTTPGun { + transport := NewTransport(conf.Client.Transport, NewDialer(conf.Client.Dialer).DialContext) + client := newClient(transport, conf.Client.Redirect) + return NewClientGun(client, conf.Gun) +} + +// NewHTTP2Gun return simple HTTP/2 gun that can shoot sequentially through one connection. +func NewHTTP2Gun(conf HTTP2GunConfig) (*HTTPGun, error) { + if !conf.Gun.SSL { + // Open issue on github if you really need this feature. + return nil, errors.New("HTTP/2.0 over TCP is not supported. Please leave SSL option true by default.") + } + transport := NewHTTP2Transport(conf.Client.Transport, NewDialer(conf.Client.Dialer).DialContext) + client := newClient(transport, conf.Client.Redirect) + // Will panic and cancel shooting whet target doesn't support HTTP/2. + client = &panicOnHTTP1Client{client} + return NewClientGun(client, conf.Gun), nil +} + +func NewClientGun(client Client, conf ClientGunConfig) *HTTPGun { + scheme := "http" + if conf.SSL { + scheme = "https" + } + var g HTTPGun + g = HTTPGun{ + BaseGun: BaseGun{ + Config: conf.Base, + Do: g.Do, + OnClose: func() error { + client.CloseIdleConnections() + return nil + }, + }, + scheme: scheme, + target: conf.Target, + client: client, + } + return &g +} + +type HTTPGun struct { + BaseGun + scheme string + target string + client Client +} + +var _ Gun = (*HTTPGun)(nil) + +func (g *HTTPGun) Do(req *http.Request) (*http.Response, error) { + req.Host = req.URL.Host + req.URL.Host = g.target + req.URL.Scheme = g.scheme + return g.client.Do(req) +} + +func DefaultHTTPGunConfig() HTTPGunConfig { + return HTTPGunConfig{ + Gun: DefaultClientGunConfig(), + Client: DefaultClientConfig(), + } +} + +func DefaultHTTP2GunConfig() HTTP2GunConfig { + conf := HTTP2GunConfig{ + Client: DefaultClientConfig(), + Gun: DefaultClientGunConfig(), + } + conf.Gun.SSL = true + return conf +} + +func DefaultClientGunConfig() ClientGunConfig { + return ClientGunConfig{ + SSL: false, + Base: DefaultBaseGunConfig(), + } +} diff --git a/components/phttp/http_test.go b/components/phttp/http_test.go new file mode 100644 index 000000000..3ffa0db23 --- /dev/null +++ b/components/phttp/http_test.go @@ -0,0 +1,237 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package phttp + +import ( + "crypto/tls" + "net/http" + "net/http/httptest" + "strings" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/gstruct" + "go.uber.org/atomic" + "go.uber.org/zap" + "golang.org/x/net/http2" + + ammomock "github.com/yandex/pandora/components/phttp/mocks" + "github.com/yandex/pandora/core/aggregator/netsample" + "github.com/yandex/pandora/core/config" +) + +var _ = Describe("BaseGun", func() { + It("GunClientConfig decode", func() { + conf := DefaultHTTPGunConfig() + data := map[interface{}]interface{}{ + "target": "test-trbo01e.haze.yandex.net:3000", + } + err := config.DecodeAndValidate(data, &conf) + Expect(err).To(BeNil()) + }) + + It("integration", func() { + const host = "example.com" + const path = "/smth" + expectedReq, err := http.NewRequest("GET", "http://"+host+path, nil) + expectedReq.Host = "" // Important. Ammo may have empty host. + Expect(err).To(BeNil()) + var actualReq *http.Request + server := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + rw.WriteHeader(http.StatusOK) + actualReq = req + })) + defer server.Close() + conf := DefaultHTTPGunConfig() + conf.Gun.Target = strings.TrimPrefix(server.URL, "http://") + results := &netsample.TestAggregator{} + httpGun := NewHTTPGun(conf) + httpGun.Bind(results, testDeps()) + + am := newAmmoReq(expectedReq) + httpGun.Shoot(am) + Expect(results.Samples[0].Err()).To(BeNil()) + + Expect(*actualReq).To(MatchFields(IgnoreExtras, Fields{ + "Method": Equal("GET"), + "Proto": Equal("HTTP/1.1"), + "Host": Equal(host), // Not server host, but host from ammo. + "URL": PointTo(MatchFields(IgnoreExtras, Fields{ + "Host": BeEmpty(), // Server request. + "Path": Equal(path), + })), + })) + }) + +}) + +func newAmmoURL(url string) Ammo { + req, err := http.NewRequest("GET", url, nil) + Expect(err).NotTo(HaveOccurred()) + return newAmmoReq(req) +} + +func newAmmoReq(req *http.Request) Ammo { + ammo := &ammomock.Ammo{} + ammo.On("Request").Return(req, netsample.Acquire("REQUEST")) + return ammo +} + +var _ = Describe("HTTP", func() { + itOk := func(https bool) { + var isServed atomic.Bool + server := httptest.NewUnstartedServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + Expect(req.Header.Get("Accept-Encoding")).To(BeEmpty()) + rw.WriteHeader(http.StatusOK) + isServed.Store(true) + })) + if https { + server.StartTLS() + } else { + server.Start() + } + defer server.Close() + conf := DefaultHTTPGunConfig() + conf.Gun.Target = server.Listener.Addr().String() + conf.Gun.SSL = https + gun := NewHTTPGun(conf) + var aggr netsample.TestAggregator + gun.Bind(&aggr, testDeps()) + gun.Shoot(newAmmoURL("/")) + + Expect(aggr.Samples).To(HaveLen(1)) + Expect(aggr.Samples[0].ProtoCode()).To(Equal(http.StatusOK)) + Expect(isServed.Load()).To(BeTrue()) + } + It("http ok", func() { itOk(false) }) + It("https ok", func() { itOk(true) }) + + itRedirect := func(redirect bool) { + server := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + if req.URL.Path == "/redirect" { + rw.Header().Add("Location", "/") + rw.WriteHeader(http.StatusMovedPermanently) + } else { + rw.WriteHeader(http.StatusOK) + } + })) + defer server.Close() + conf := DefaultHTTPGunConfig() + conf.Gun.Target = server.Listener.Addr().String() + conf.Client.Redirect = redirect + gun := NewHTTPGun(conf) + var aggr netsample.TestAggregator + gun.Bind(&aggr, testDeps()) + gun.Shoot(newAmmoURL("/redirect")) + + Expect(aggr.Samples).To(HaveLen(1)) + expectedCode := http.StatusMovedPermanently + if redirect { + expectedCode = http.StatusOK + } + Expect(aggr.Samples[0].ProtoCode()).To(Equal(expectedCode)) + } + It("not follow redirects by default", func() { itRedirect(false) }) + It("follow redirects if option set ", func() { itRedirect(true) }) + + It("not support HTTP2", func() { + server := newHTTP2TestServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + if isHTTP2Request(req) { + rw.WriteHeader(http.StatusForbidden) + } else { + rw.WriteHeader(http.StatusOK) + } + })) + defer server.Close() + + // Test, that configured server serves HTTP2 well. + http2OnlyClient := http.Client{ + Transport: &http2.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + }} + res, err := http2OnlyClient.Get(server.URL) + Expect(err).NotTo(HaveOccurred()) + Expect(res.StatusCode).To(Equal(http.StatusForbidden)) + + conf := DefaultHTTPGunConfig() + conf.Gun.Target = server.Listener.Addr().String() + conf.Gun.SSL = true + gun := NewHTTPGun(conf) + var results netsample.TestAggregator + gun.Bind(&results, testDeps()) + gun.Shoot(newAmmoURL("/")) + + Expect(results.Samples).To(HaveLen(1)) + Expect(results.Samples[0].ProtoCode()).To(Equal(http.StatusOK)) + }) + +}) + +var _ = Describe("HTTP/2", func() { + It("HTTP/2 ok", func() { + server := newHTTP2TestServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + if isHTTP2Request(req) { + rw.WriteHeader(http.StatusOK) + } else { + rw.WriteHeader(http.StatusForbidden) + } + })) + defer server.Close() + conf := DefaultHTTP2GunConfig() + conf.Gun.Target = server.Listener.Addr().String() + gun, _ := NewHTTP2Gun(conf) + var results netsample.TestAggregator + gun.Bind(&results, testDeps()) + gun.Shoot(newAmmoURL("/")) + Expect(results.Samples[0].ProtoCode()).To(Equal(http.StatusOK)) + }) + + It("HTTP/1.1 panic", func() { + server := httptest.NewTLSServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + zap.S().Info("Served") + })) + defer server.Close() + conf := DefaultHTTP2GunConfig() + conf.Gun.Target = server.Listener.Addr().String() + gun, _ := NewHTTP2Gun(conf) + var results netsample.TestAggregator + gun.Bind(&results, testDeps()) + var r interface{} + func() { + defer func() { + r = recover() + }() + gun.Shoot(newAmmoURL("/")) + }() + Expect(r).NotTo(BeNil()) + Expect(r).To(ContainSubstring(notHTTP2PanicMsg)) + }) + + It("no SSL construction fails", func() { + server := httptest.NewTLSServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + zap.S().Info("Served") + })) + defer server.Close() + conf := DefaultHTTP2GunConfig() + conf.Gun.Target = server.Listener.Addr().String() + conf.Gun.SSL = false + _, err := NewHTTP2Gun(conf) + Expect(err).To(HaveOccurred()) + }) + +}) + +func isHTTP2Request(req *http.Request) bool { + return checkHTTP2(req.TLS) == nil +} + +func newHTTP2TestServer(handler http.Handler) *httptest.Server { + server := httptest.NewUnstartedServer(handler) + http2.ConfigureServer(server.Config, nil) + server.TLS = server.Config.TLSConfig // StartTLS takes TLS configuration from that field. + server.StartTLS() + return server +} diff --git a/components/phttp/import/import.go b/components/phttp/import/import.go new file mode 100644 index 000000000..3f2d5e1a2 --- /dev/null +++ b/components/phttp/import/import.go @@ -0,0 +1,88 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package phttp + +import ( + "net" + + "github.com/spf13/afero" + "go.uber.org/zap" + + . "github.com/yandex/pandora/components/phttp" + "github.com/yandex/pandora/components/phttp/ammo/simple/jsonline" + "github.com/yandex/pandora/components/phttp/ammo/simple/raw" + "github.com/yandex/pandora/components/phttp/ammo/simple/uri" + "github.com/yandex/pandora/core" + "github.com/yandex/pandora/core/register" + "github.com/yandex/pandora/lib/netutil" +) + +func Import(fs afero.Fs) { + register.Provider("http/json", func(conf jsonline.Config) core.Provider { + return jsonline.NewProvider(fs, conf) + }) + + register.Provider("uri", func(conf uri.Config) core.Provider { + return uri.NewProvider(fs, conf) + }) + + register.Provider("raw", func(conf raw.Config) core.Provider { + return raw.NewProvider(fs, conf) + }) + + register.Gun("http", func(conf HTTPGunConfig) func() core.Gun { + preResolveTargetAddr(&conf.Client, &conf.Gun.Target) + return func() core.Gun { return WrapGun(NewHTTPGun(conf)) } + }, DefaultHTTPGunConfig) + + register.Gun("http2", func(conf HTTP2GunConfig) func() (core.Gun, error) { + preResolveTargetAddr(&conf.Client, &conf.Gun.Target) + return func() (core.Gun, error) { + gun, err := NewHTTP2Gun(conf) + return WrapGun(gun), err + } + }, DefaultHTTP2GunConfig) + + register.Gun("connect", func(conf ConnectGunConfig) func() core.Gun { + preResolveTargetAddr(&conf.Client, &conf.Target) + return func() core.Gun { + return WrapGun(NewConnectGun(conf)) + } + }, DefaultConnectGunConfig) +} + +// DNS resolve optimisation. +// When DNSCache turned off - do nothing extra, host will be resolved on every shoot. +// When using resolved target, don't use DNS caching logic - it is useless. +// If we can resolve accessible target addr - use it as target, not use caching. +// Otherwise just use DNS cache - we should not fail shooting, we should try to +// connect on every shoot. DNS cache will save resolved addr after first successful connect. +func preResolveTargetAddr(clientConf *ClientConfig, target *string) (err error) { + if !clientConf.Dialer.DNSCache { + return + } + if endpointIsResolved(*target) { + clientConf.Dialer.DNSCache = false + return + } + resolved, err := netutil.LookupReachable(*target) + if err != nil { + zap.L().Warn("DNS target pre resolve failed", + zap.String("target", *target), zap.Error(err)) + return + } + clientConf.Dialer.DNSCache = false + *target = resolved + return +} + +func endpointIsResolved(endpoint string) bool { + host, _, err := net.SplitHostPort(endpoint) + if err != nil { + return false + } + return net.ParseIP(host) != nil +} diff --git a/components/phttp/import/import_suite_test.go b/components/phttp/import/import_suite_test.go new file mode 100644 index 000000000..3a0b5a844 --- /dev/null +++ b/components/phttp/import/import_suite_test.go @@ -0,0 +1,72 @@ +package phttp + +import ( + "net" + "strconv" + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/spf13/afero" + + . "github.com/yandex/pandora/components/phttp" + "github.com/yandex/pandora/lib/ginkgoutil" +) + +func TestImport(t *testing.T) { + ginkgoutil.RunSuite(t, "phttp Import Suite") +} + +var _ = Describe("import", func() { + It("not panics", func() { + Expect(func() { + Import(afero.NewOsFs()) + }).NotTo(Panic()) + }) +}) + +var _ = Describe("preResolveTargetAddr", func() { + It("host target", func() { + conf := &ClientConfig{} + conf.Dialer.DNSCache = true + + listener, err := net.ListenTCP("tcp4", nil) + defer listener.Close() + Expect(err).NotTo(HaveOccurred()) + + port := strconv.Itoa(listener.Addr().(*net.TCPAddr).Port) + target := "localhost:" + port + expectedResolved := "127.0.0.1:" + port + + err = preResolveTargetAddr(conf, &target) + Expect(err).NotTo(HaveOccurred()) + Expect(conf.Dialer.DNSCache).To(BeFalse()) + + Expect(target).To(Equal(expectedResolved)) + }) + + It("ip target", func() { + conf := &ClientConfig{} + conf.Dialer.DNSCache = true + + const addr = "127.0.0.1:80" + target := addr + err := preResolveTargetAddr(conf, &target) + Expect(err).NotTo(HaveOccurred()) + Expect(conf.Dialer.DNSCache).To(BeFalse()) + Expect(target).To(Equal(addr)) + }) + + It("failed", func() { + conf := &ClientConfig{} + conf.Dialer.DNSCache = true + + const addr = "localhost:54321" + target := addr + err := preResolveTargetAddr(conf, &target) + Expect(err).To(HaveOccurred()) + Expect(conf.Dialer.DNSCache).To(BeTrue()) + Expect(target).To(Equal(addr)) + }) + +}) diff --git a/components/phttp/mock_client_test.go b/components/phttp/mock_client_test.go new file mode 100644 index 000000000..a56da548a --- /dev/null +++ b/components/phttp/mock_client_test.go @@ -0,0 +1,38 @@ +// Code generated by mockery v1.0.0 +package phttp + +import "net/http" +import "github.com/stretchr/testify/mock" + +// MockClient is an autogenerated mock type for the Client type +type MockClient struct { + mock.Mock +} + +// CloseIdleConnections provides a mock function with given fields: +func (_m *MockClient) CloseIdleConnections() { + _m.Called() +} + +// Do provides a mock function with given fields: req +func (_m *MockClient) Do(req *http.Request) (*http.Response, error) { + ret := _m.Called(req) + + var r0 *http.Response + if rf, ok := ret.Get(0).(func(*http.Request) *http.Response); ok { + r0 = rf(req) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*http.Response) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(*http.Request) error); ok { + r1 = rf(req) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/components/phttp/mocks/ammo.go b/components/phttp/mocks/ammo.go new file mode 100644 index 000000000..533d7fccc --- /dev/null +++ b/components/phttp/mocks/ammo.go @@ -0,0 +1,50 @@ +// Code generated by mockery v1.0.0 +package ammomock + +import http "net/http" +import mock "github.com/stretchr/testify/mock" +import netsample "github.com/yandex/pandora/core/aggregator/netsample" + +// Ammo is an autogenerated mock type for the Ammo type +type Ammo struct { + mock.Mock +} + +// Id provides a mock function with given fields: +func (_m *Ammo) Id() int { + ret := _m.Called() + + var r0 int + if rf, ok := ret.Get(0).(func() int); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int) + } + + return r0 +} + +// Request provides a mock function with given fields: +func (_m *Ammo) Request() (*http.Request, *netsample.Sample) { + ret := _m.Called() + + var r0 *http.Request + if rf, ok := ret.Get(0).(func() *http.Request); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*http.Request) + } + } + + var r1 *netsample.Sample + if rf, ok := ret.Get(1).(func() *netsample.Sample); ok { + r1 = rf() + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(*netsample.Sample) + } + } + + return r0, r1 +} diff --git a/components/phttp/phttp_suite_test.go b/components/phttp/phttp_suite_test.go new file mode 100644 index 000000000..2dc671a62 --- /dev/null +++ b/components/phttp/phttp_suite_test.go @@ -0,0 +1,16 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package phttp + +import ( + "testing" + + "github.com/yandex/pandora/lib/ginkgoutil" +) + +func TestPhttp(t *testing.T) { + ginkgoutil.RunSuite(t, "HTTP Suite") +} diff --git a/components/phttp/testdata/ammo.jsonline b/components/phttp/testdata/ammo.jsonline new file mode 100644 index 000000000..76ddb5c5f --- /dev/null +++ b/components/phttp/testdata/ammo.jsonline @@ -0,0 +1,27 @@ +{"uri": "/00", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "User-Agent": "Pandora/0.0.1"}, "host": "localhost:3000"} +{"uri": "/01", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "User-Agent": "Pandora/0.0.1"}, "host": "localhost:3000"} +{"tag": "hello", "uri": "/02", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "User-Agent": "Pandora/0.0.1"}, "host": "localhost:3000"} +{"uri": "/03", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "User-Agent": "Pandora/0.0.1"}, "host": "localhost:3000"} +{"uri": "/04", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "User-Agent": "Pandora/0.0.1"}, "host": "localhost:3000"} +{"uri": "/05", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "User-Agent": "Pandora/0.0.1"}, "host": "localhost:3000"} +{"uri": "/06", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "User-Agent": "Pandora/0.0.1"}, "host": "localhost:3000"} +{"uri": "/07", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "User-Agent": "Pandora/0.0.1"}, "host": "localhost:3000"} +{"tag": "hello", "uri": "/08", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "User-Agent": "Pandora/0.0.1"}, "host": "localhost:3000"} +{"uri": "/09", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "User-Agent": "Pandora/0.0.1"}, "host": "localhost:3000"} +{"uri": "/10", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "User-Agent": "Pandora/0.0.1"}, "host": "localhost:3000"} +{"uri": "/11", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "User-Agent": "Pandora/0.0.1"}, "host": "localhost:3000"} +{"uri": "/12", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "User-Agent": "Pandora/0.0.1"}, "host": "localhost:3000"} +{"uri": "/13", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "User-Agent": "Pandora/0.0.1"}, "host": "localhost:3000"} +{"uri": "/14", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "User-Agent": "Pandora/0.0.1"}, "host": "localhost:3000"} +{"uri": "/15", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "User-Agent": "Pandora/0.0.1"}, "host": "localhost:3000"} +{"uri": "/16", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "User-Agent": "Pandora/0.0.1"}, "host": "localhost:3000"} +{"uri": "/17", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "User-Agent": "Pandora/0.0.1"}, "host": "localhost:3000"} +{"uri": "/18", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "User-Agent": "Pandora/0.0.1"}, "host": "localhost:3000"} +{"uri": "/19", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "User-Agent": "Pandora/0.0.1"}, "host": "localhost:3000"} +{"uri": "/20", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "User-Agent": "Pandora/0.0.1"}, "host": "localhost:3000"} +{"uri": "/21", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "User-Agent": "Pandora/0.0.1"}, "host": "localhost:3000"} +{"uri": "/22", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "User-Agent": "Pandora/0.0.1"}, "host": "localhost:3000"} +{"uri": "/23", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "User-Agent": "Pandora/0.0.1"}, "host": "localhost:3000"} +{"tag": "hello", "uri": "/24", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "User-Agent": "Pandora/0.0.1"}, "host": "localhost:3000"} +{"uri": "/25", "method": "POST", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "User-Agent": "Pandora/0.0.1"}, "host": "localhost:3000", "body": "some_plaintext_body"} +{"uri": "/26", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "User-Agent": "Pandora/0.0.1"}, "host": "localhost:3000", "body": "{\"areaId\":10770}"} diff --git a/components/phttp/testdata/ammo.stpd b/components/phttp/testdata/ammo.stpd new file mode 100644 index 000000000..426d5f61a --- /dev/null +++ b/components/phttp/testdata/ammo.stpd @@ -0,0 +1,28 @@ +54 +GET / HTTP/1.1 +Host: www.ya.ru +Connection: close + + +58 +GET /test HTTP/1.1 +Host: www.ya.ru +Connection: close + + +59 tag +GET /test2 HTTP/1.1 +Host: www.ya.ru +Connection: close + + +84 tag +POST /test3 HTTP/1.1 +Host: www.ya.ru +Connection: close +Content-Length: 5 + +hello + + +0 diff --git a/gun/http/testdata/test.crt b/components/phttp/testdata/test.crt similarity index 100% rename from gun/http/testdata/test.crt rename to components/phttp/testdata/test.crt diff --git a/gun/http/testdata/test.key b/components/phttp/testdata/test.key similarity index 100% rename from gun/http/testdata/test.key rename to components/phttp/testdata/test.key diff --git a/config/config.go b/config/config.go deleted file mode 100644 index 3308a65b3..000000000 --- a/config/config.go +++ /dev/null @@ -1,58 +0,0 @@ -package config - -import ( - "encoding/json" -) - -type Global struct { - Pools []UserPool -} - -type AmmoProvider struct { - AmmoType string - AmmoSource string // filename for ammo file (decoder based on ammo type) - AmmoLimit int // limit of ammos, default is 0 - infinite ammos - Passes int // number of passes, default is 0 - infinite passes -} - -type Gun struct { - GunType string - Parameters map[string]interface{} -} - -type ResultListener struct { - ListenerType string - Destination string -} - -type Limiter struct { - LimiterType string - Parameters map[string]interface{} -} - -type CompositeLimiter struct { - Steps []Limiter -} - -type User struct { - Name string - Gun *Gun - AmmoProvider AmmoProvider - ResultListener ResultListener - Limiter *Limiter -} - -type UserPool struct { - Name string - Gun *Gun - AmmoProvider *AmmoProvider - ResultListener *ResultListener - UserLimiter *Limiter - StartupLimiter *Limiter - SharedSchedule bool // wether or not will all Users from this pool have shared schedule -} - -func NewGlobalFromJSON(jsonDoc []byte) (gc Global, err error) { - err = json.Unmarshal(jsonDoc, &gc) - return -} diff --git a/config/config_test.go b/config/config_test.go deleted file mode 100644 index 94545ca64..000000000 --- a/config/config_test.go +++ /dev/null @@ -1,74 +0,0 @@ -package config - -import ( - "encoding/json" - "log" - "testing" -) - -func TestGlobalConfig(t *testing.T) { - lc := &Limiter{ - LimiterType: "periodic", - Parameters: map[string]interface{}{ - "Period": 1.0, - "BatchSize": 3.0, - "MaxCount": 9.0, - }, - } - slc := &Limiter{ - LimiterType: "periodic", - Parameters: map[string]interface{}{ - "Period": 0.1, - "BatchSize": 2.0, - "MaxCount": 5.0, - }, - } - apc := &AmmoProvider{ - AmmoType: "jsonline/spdy", - AmmoSource: "./example/data/ammo.jsonline", - } - rlc := &ResultListener{ - ListenerType: "log/simple", - } - gc := &Gun{ - GunType: "spdy", - Parameters: map[string]interface{}{ - "Target": "localhost:3000", - "PingPeriod": "5", - }, - } - globalConfig := &Global{ - Pools: []UserPool{ - { - Name: "Pool#0", - Gun: gc, - AmmoProvider: apc, - ResultListener: rlc, - UserLimiter: lc, - StartupLimiter: slc, - }, - { - Name: "Pool#1", - Gun: gc, - AmmoProvider: apc, - ResultListener: rlc, - UserLimiter: lc, - StartupLimiter: slc, - }, - }, - } - jsonDoc, err := json.Marshal(globalConfig) - if err != nil { - t.Errorf("Could not marshal config to json: %s", err) - return - } - - log.Println(string(jsonDoc)) - - newgc, err := NewGlobalFromJSON(jsonDoc) - if err != nil { - t.Errorf("Could not unmarshal config from json: %s", err) - return - } - log.Printf("\n%#v\n", newgc) -} diff --git a/core/aggregator/discard.go b/core/aggregator/discard.go new file mode 100644 index 000000000..6dae8647f --- /dev/null +++ b/core/aggregator/discard.go @@ -0,0 +1,29 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package aggregator + +import ( + "context" + + "github.com/yandex/pandora/core" + "github.com/yandex/pandora/core/coreutil" +) + +// NewDiscard returns Aggregator that just throws reported ammo away. +func NewDiscard() core.Aggregator { + return discard{} +} + +type discard struct{} + +func (discard) Run(ctx context.Context, _ core.AggregatorDeps) error { + <-ctx.Done() + return nil +} + +func (discard) Report(s core.Sample) { + coreutil.ReturnSampleIfBorrowed(s) +} diff --git a/core/aggregator/encoder.go b/core/aggregator/encoder.go new file mode 100644 index 000000000..0b0dca75b --- /dev/null +++ b/core/aggregator/encoder.go @@ -0,0 +1,157 @@ +// Copyright (c) 2018 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package aggregator + +import ( + "context" + "io" + "time" + + "github.com/pkg/errors" + + "github.com/yandex/pandora/core" + "github.com/yandex/pandora/core/coreutil" + "github.com/yandex/pandora/lib/errutil" +) + +type NewSampleEncoder func(w io.Writer, onFlush func()) SampleEncoder + +//go:generate mockery -name=SampleEncoder -case=underscore -outpkg=aggregatemock + +// SampleEncoder is efficient, buffered encoder of samples. +// SampleEncoder MAY support only concrete type of sample. +// MAY also implement SampleEncodeCloser. +type SampleEncoder interface { + // SampleEncoder SHOULD panic, if passed sample type is not supported. + Encode(s core.Sample) error + // Flush flushes internal buffer to wrapped io.Writer. + Flush() error + // Optional. Close MUST be called, if io.Closer is implemented. + // io.Closer +} + +//go:generate mockery -name=SampleEncodeCloser -case=underscore -outpkg=aggregatemock + +// SampleEncoderCloser is SampleEncoder that REQUIRE Close call to finish encoding. +type SampleEncodeCloser interface { + SampleEncoder + io.Closer +} + +type EncoderAggregatorConfig struct { + Sink core.DataSink `config:"sink" validate:"required"` + BufferSize int `config:"buffer-size"` + FlushInterval time.Duration `config:"flush-interval"` + ReporterConfig ReporterConfig `config:",squash"` +} + +func DefaultEncoderAggregatorConfig() EncoderAggregatorConfig { + return EncoderAggregatorConfig{ + FlushInterval: time.Second, + ReporterConfig: DefaultReporterConfig(), + } +} + +// NewEncoderAggregator returns aggregator that use SampleEncoder to marshall samples to core.DataSink. +// Handles encoder flushing and sample dropping on queue overflow. +// putSample is optional func, that called on handled sample. Usually returns sample to pool. +func NewEncoderAggregator( + newEncoder NewSampleEncoder, + conf EncoderAggregatorConfig, +) core.Aggregator { + return &dataSinkAggregator{ + Reporter: *NewReporter(conf.ReporterConfig), + newEncoder: newEncoder, + conf: conf, + } +} + +type dataSinkAggregator struct { + Reporter + core.AggregatorDeps + + newEncoder NewSampleEncoder + conf EncoderAggregatorConfig +} + +func (a *dataSinkAggregator) Run(ctx context.Context, deps core.AggregatorDeps) (err error) { + a.AggregatorDeps = deps + + sink, err := a.conf.Sink.OpenSink() + if err != nil { + return + } + defer func() { + closeErr := sink.Close() + err = errutil.Join(err, closeErr) + err = errutil.Join(err, a.DroppedErr()) + }() + + var flushes int + encoder := a.newEncoder(sink, func() { + flushes++ + }) + defer func() { + if encoder, ok := encoder.(io.Closer); ok { + closeErr := encoder.Close() + err = errutil.Join(err, errors.WithMessage(closeErr, "encoder close failed")) + return + } + flushErr := encoder.Flush() + err = errutil.Join(err, errors.WithMessage(flushErr, "final flush failed")) + }() + + var flushTick <-chan time.Time + if a.conf.FlushInterval > 0 { + flushTicker := time.NewTicker(a.conf.FlushInterval) + flushTick = flushTicker.C + defer flushTicker.Stop() + } + + var previousFlushes int +HandleLoop: + for { + select { + case sample := <-a.Incomming: + err = a.handleSample(encoder, sample) + if err != nil { + return + } + case _ = <-flushTick: + if previousFlushes == flushes { + a.Log.Debug("Flushing") + err = encoder.Flush() + if err != nil { + return + } + } + previousFlushes = flushes + case <-ctx.Done(): + break HandleLoop // Still need to handle all queued samples. + } + } + + for { + select { + case sample := <-a.Incomming: + err = a.handleSample(encoder, sample) + if err != nil { + return + } + default: + return nil + } + } +} + +func (a *dataSinkAggregator) handleSample(enc SampleEncoder, sample core.Sample) error { + err := enc.Encode(sample) + if err != nil { + return errors.WithMessage(err, "sample encode failed") + } + coreutil.ReturnSampleIfBorrowed(sample) + return nil +} diff --git a/core/aggregator/encoder_test.go b/core/aggregator/encoder_test.go new file mode 100644 index 000000000..d9175627e --- /dev/null +++ b/core/aggregator/encoder_test.go @@ -0,0 +1,266 @@ +// Copyright (c) 2018 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package aggregator + +import ( + "context" + "fmt" + "io" + "testing" + "time" + + "github.com/hashicorp/go-multierror" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "github.com/yandex/pandora/core" + aggregatemock "github.com/yandex/pandora/core/aggregator/mocks" + coremock "github.com/yandex/pandora/core/mocks" + iomock "github.com/yandex/pandora/lib/ioutil2/mocks" + "github.com/yandex/pandora/lib/testutil" +) + +type EncoderAggregatorTester struct { + t testutil.TestingT + wc *iomock.WriteCloser + sink *coremock.DataSink + enc *aggregatemock.SampleEncoder + newEncoder NewSampleEncoder + conf EncoderAggregatorConfig + ctx context.Context + cancel context.CancelFunc + deps core.AggregatorDeps + + flushCB func() +} + +func (tr *EncoderAggregatorTester) Testee() core.Aggregator { + return NewEncoderAggregator(tr.newEncoder, tr.conf) +} + +func (tr *EncoderAggregatorTester) AssertExpectations() { + t := tr.t + tr.enc.AssertExpectations(t) + tr.wc.AssertExpectations(t) + tr.sink.AssertExpectations(t) +} + +func NewEncoderAggregatorTester(t testutil.TestingT) *EncoderAggregatorTester { + testutil.ReplaceGlobalLogger() + tr := &EncoderAggregatorTester{t: t} + tr.wc = &iomock.WriteCloser{} + tr.sink = &coremock.DataSink{} + tr.sink.On("OpenSink").Once().Return(tr.wc, nil) + tr.enc = &aggregatemock.SampleEncoder{} + + tr.newEncoder = func(w io.Writer, flushCB func()) SampleEncoder { + assert.Equal(t, tr.wc, w) + tr.flushCB = flushCB + return tr.enc + } + tr.conf = EncoderAggregatorConfig{ + Sink: tr.sink, + FlushInterval: time.Second, + ReporterConfig: ReporterConfig{100}, + } + tr.ctx, tr.cancel = context.WithCancel(context.Background()) + tr.deps = core.AggregatorDeps{zap.L()} + return tr +} + +func TestEncoderAggregator(t *testing.T) { + tr := NewEncoderAggregatorTester(t) + runErr := make(chan error, 1) + + testee := tr.Testee() + go func() { + runErr <- testee.Run(tr.ctx, tr.deps) + }() + + for i := 0; i < 10; i++ { + tr.enc.On("Encode", i).Once().Return(nil) + testee.Report(i) + } + + tr.enc.On("Flush").Once().Return(func() error { + tr.wc.On("Close").Once().Return(nil) + return nil + }) + + tr.cancel() + err := <-runErr + require.NoError(t, err) + + tr.AssertExpectations() + + assert.NotPanics(t, func() { + testee.Report(100) + }) +} + +func TestEncoderAggregator_HandleQueueBeforeFinish(t *testing.T) { + tr := NewEncoderAggregatorTester(t) + testee := tr.Testee() + + for i := 0; i < 10; i++ { + tr.enc.On("Encode", i).Once().Return(nil) + testee.Report(i) + } + tr.enc.On("Flush").Once().Return(func() error { + tr.wc.On("Close").Once().Return(nil) + return nil + }) + + tr.cancel() + err := testee.Run(tr.ctx, tr.deps) + require.NoError(t, err) + + tr.AssertExpectations() +} + +func TestEncoderAggregator_CloseSampleEncoder(t *testing.T) { + tr := NewEncoderAggregatorTester(t) + newWOCloseEncoder := tr.newEncoder + tr.newEncoder = func(w io.Writer, onFlush func()) SampleEncoder { + encoder := newWOCloseEncoder(w, onFlush).(*aggregatemock.SampleEncoder) + return MockSampleEncoderAddCloser{encoder} + } + testee := tr.Testee() + + tr.enc.On("Encode", 0).Once().Return(nil) + testee.Report(0) + + tr.enc.On("Close").Once().Return(func() error { + tr.wc.On("Close").Once().Return(nil) + return nil + }) + + tr.cancel() + err := testee.Run(tr.ctx, tr.deps) + require.NoError(t, err) + tr.AssertExpectations() +} + +func TestEncoderAggregator_EverythingFailed(t *testing.T) { + tr := NewEncoderAggregatorTester(t) + tr.conf.ReporterConfig.SampleQueueSize = 1 + testee := tr.Testee() + + var ( + encodeErr = fmt.Errorf("encode") + flushErr = fmt.Errorf("flush") + wcCloseErr = fmt.Errorf("wc close") + ) + tr.enc.On("Encode", 0).Once().Return(encodeErr) + testee.Report(0) + testee.Report(1) // Dropped + + tr.enc.On("Flush").Once().Return(func() error { + tr.wc.On("Close").Once().Return(wcCloseErr) + return flushErr + }) + + tr.cancel() + err := testee.Run(tr.ctx, tr.deps) + require.Error(t, err) + + wrappedErrors := err.(*multierror.Error).WrappedErrors() + var causes []error + for _, err := range wrappedErrors { + causes = append(causes, errors.Cause(err)) + } + expectedErrors := []error{encodeErr, flushErr, wcCloseErr, &SomeSamplesDropped{1}} + assert.Equal(t, expectedErrors, causes) + + tr.AssertExpectations() +} + +func TestEncoderAggregator_AutoFlush(t *testing.T) { + testutil.RunFlaky(t, func(t testutil.TestingT) { + tr := NewEncoderAggregatorTester(t) + const flushInterval = 20 * time.Millisecond + tr.conf.FlushInterval = flushInterval + testee := tr.Testee() + + var flushes int + const expectedAutoFlushes = 2 + time.AfterFunc(expectedAutoFlushes*flushInterval+flushInterval/2, tr.cancel) + tr.enc.On("Flush").Return(func() error { + flushes++ + return nil + }) + tr.wc.On("Close").Return(nil) + err := testee.Run(tr.ctx, tr.deps) + require.NoError(t, err) + + assert.Equal(t, expectedAutoFlushes+1, flushes, "Expeced + one for finish") + }) +} + +func TestEncoderAggregator_ManualFlush(t *testing.T) { + testutil.RunFlaky(t, func(t testutil.TestingT) { + tr := NewEncoderAggregatorTester(t) + const autoFlushInterval = 15 * time.Millisecond + tr.conf.FlushInterval = autoFlushInterval + testee := tr.Testee() + var ( + flushes int + ) + + tr.enc.On("Encode", mock.Anything).Return(func(core.Sample) error { + tr.flushCB() + return nil + }) + tr.enc.On("Flush").Return(func() error { + flushes++ + tr.flushCB() + return nil + }) + tr.wc.On("Close").Return(nil) + + time.AfterFunc(autoFlushInterval*3, tr.cancel) + + runErr := make(chan error) + go func() { + runErr <- testee.Run(tr.ctx, tr.deps) + }() + writeTicker := time.NewTicker(autoFlushInterval / 3) + defer writeTicker.Stop() + for { + select { + case _ = <-writeTicker.C: + testee.Report(0) + case <-tr.ctx.Done(): + return + } + } + err := <-runErr + require.NoError(t, err) + assert.Equal(t, 1, flushes) + tr.AssertExpectations() + }) +} + +type MockSampleEncoderAddCloser struct { + *aggregatemock.SampleEncoder +} + +// Close provides a mock function with given fields: +func (_m MockSampleEncoderAddCloser) Close() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} diff --git a/core/aggregator/jsonlines.go b/core/aggregator/jsonlines.go new file mode 100644 index 000000000..2d4747c7d --- /dev/null +++ b/core/aggregator/jsonlines.go @@ -0,0 +1,80 @@ +// Copyright (c) 2018 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package aggregator + +import ( + "bufio" + "io" + + jsoniter "github.com/json-iterator/go" + "github.com/yandex/pandora/lib/ioutil2" + + "github.com/yandex/pandora/core" + "github.com/yandex/pandora/core/config" + "github.com/yandex/pandora/core/coreutil" +) + +type JSONLineAggregatorConfig struct { + EncoderAggregatorConfig `config:",squash"` + JSONLineEncoderConfig `config:",squash"` +} + +type JSONLineEncoderConfig struct { + JSONIterConfig `config:",squash"` + coreutil.BufferSizeConfig `config:",squash"` +} + +// JSONIterConfig is subset of jsoniter.Config that may be useful to configure. +type JSONIterConfig struct { + // MarshalFloatWith6Digits makes float marshalling faster. + MarshalFloatWith6Digits bool `config:"marshal-float-with-6-digits"` + // SortMapKeys useful, when sample contains map object, and you want to see them in same order. + SortMapKeys bool `config:"sort-map-keys"` +} + +func DefaultJSONLinesAggregatorConfig() JSONLineAggregatorConfig { + return JSONLineAggregatorConfig{ + EncoderAggregatorConfig: DefaultEncoderAggregatorConfig(), + } +} + +// Aggregates samples in JSON Lines format: each output line is a Valid JSON Value of one sample. +// See http://jsonlines.org/ for details. +func NewJSONLinesAggregator(conf JSONLineAggregatorConfig) core.Aggregator { + var newEncoder NewSampleEncoder = func(w io.Writer, onFlush func()) SampleEncoder { + w = ioutil2.NewCallbackWriter(w, onFlush) + return NewJSONEncoder(w, conf.JSONLineEncoderConfig) + } + return NewEncoderAggregator(newEncoder, conf.EncoderAggregatorConfig) +} + +func NewJSONEncoder(w io.Writer, conf JSONLineEncoderConfig) SampleEncoder { + var apiConfig jsoniter.Config + config.Map(&apiConfig, conf.JSONIterConfig) + api := apiConfig.Froze() + // NOTE(skipor): internal buffering is not working really. Don't know why + // OPTIMIZE(skipor): don't wrap into buffer, if already ioutil2.ByteWriter + buf := bufio.NewWriterSize(w, conf.BufferSizeOrDefault()) + stream := jsoniter.NewStream(api, buf, conf.BufferSizeOrDefault()) + return &jsonEncoder{stream, buf} +} + +type jsonEncoder struct { + *jsoniter.Stream + buf *bufio.Writer +} + +func (e *jsonEncoder) Encode(s core.Sample) error { + e.WriteVal(s) + e.WriteRaw("\n") + return e.Error +} + +func (e *jsonEncoder) Flush() error { + err := e.Stream.Flush() + e.buf.Flush() + return err +} diff --git a/core/aggregator/jsonlines_test.go b/core/aggregator/jsonlines_test.go new file mode 100644 index 000000000..ec8ae97eb --- /dev/null +++ b/core/aggregator/jsonlines_test.go @@ -0,0 +1,61 @@ +// Copyright (c) 2018 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package aggregator + +import ( + "context" + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "github.com/yandex/pandora/core" + "github.com/yandex/pandora/core/datasink" +) + +type jsonTestData struct { + String string `json:"string"` + Int int `json:"int"` +} + +func TestNewJSONLinesAggregator(t *testing.T) { + samples := []jsonTestData{ + {"A", 1}, + {"B", 2}, + {"C", 3}, + } + + conf := DefaultJSONLinesAggregatorConfig() + sink := &datasink.Buffer{} + conf.Sink = sink + testee := NewJSONLinesAggregator(conf) + ctx, cancel := context.WithCancel(context.Background()) + + runErr := make(chan error) + go func() { + runErr <- testee.Run(ctx, core.AggregatorDeps{zap.L()}) + }() + + for _, sample := range samples { + testee.Report(sample) + } + cancel() + err := <-runErr + require.NoError(t, err) + + for _, expected := range samples { + var actual jsonTestData + line, err := sink.ReadBytes('\n') + require.NoError(t, err) + err = json.Unmarshal(line, &actual) + require.NoError(t, err) + assert.Equal(t, expected, actual) + } + + assert.Zero(t, sink.Len()) +} diff --git a/core/aggregator/log.go b/core/aggregator/log.go new file mode 100644 index 000000000..cf40e5ca3 --- /dev/null +++ b/core/aggregator/log.go @@ -0,0 +1,53 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package aggregator + +import ( + "context" + + "go.uber.org/zap" + + "github.com/yandex/pandora/core" +) + +func NewLog() core.Aggregator { + return &logging{sink: make(chan core.Sample, 128)} +} + +type logging struct { + sink chan core.Sample + log *zap.SugaredLogger +} + +func (l *logging) Report(sample core.Sample) { + l.sink <- sample +} + +func (l *logging) Run(ctx context.Context, deps core.AggregatorDeps) error { + l.log = deps.Log.Sugar() +loop: + for { + select { + case sample := <-l.sink: + l.handle(sample) + case <-ctx.Done(): + break loop + } + } + for { + // Context is done, but we should read all data from sink. + select { + case r := <-l.sink: + l.handle(r) + default: + return nil + } + } +} + +func (l *logging) handle(sample core.Sample) { + l.log.Info("Sample reported: ", sample) +} diff --git a/core/aggregator/mocks/sample_encode_closer.go b/core/aggregator/mocks/sample_encode_closer.go new file mode 100644 index 000000000..65735cf03 --- /dev/null +++ b/core/aggregator/mocks/sample_encode_closer.go @@ -0,0 +1,52 @@ +// Code generated by mockery v1.0.0 +package aggregatemock + +import core "github.com/yandex/pandora/core" +import mock "github.com/stretchr/testify/mock" + +// SampleEncodeCloser is an autogenerated mock type for the SampleEncodeCloser type +type SampleEncodeCloser struct { + mock.Mock +} + +// Close provides a mock function with given fields: +func (_m *SampleEncodeCloser) Close() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Encode provides a mock function with given fields: s +func (_m *SampleEncodeCloser) Encode(s core.Sample) error { + ret := _m.Called(s) + + var r0 error + if rf, ok := ret.Get(0).(func(core.Sample) error); ok { + r0 = rf(s) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Flush provides a mock function with given fields: +func (_m *SampleEncodeCloser) Flush() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} diff --git a/core/aggregator/mocks/sample_encoder.go b/core/aggregator/mocks/sample_encoder.go new file mode 100644 index 000000000..344213eb5 --- /dev/null +++ b/core/aggregator/mocks/sample_encoder.go @@ -0,0 +1,38 @@ +// Code generated by mockery v1.0.0 +package aggregatemock + +import core "github.com/yandex/pandora/core" +import mock "github.com/stretchr/testify/mock" + +// SampleEncoder is an autogenerated mock type for the SampleEncoder type +type SampleEncoder struct { + mock.Mock +} + +// Encode provides a mock function with given fields: s +func (_m *SampleEncoder) Encode(s core.Sample) error { + ret := _m.Called(s) + + var r0 error + if rf, ok := ret.Get(0).(func(core.Sample) error); ok { + r0 = rf(s) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Flush provides a mock function with given fields: +func (_m *SampleEncoder) Flush() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} diff --git a/core/aggregator/netsample/aggregator.go b/core/aggregator/netsample/aggregator.go new file mode 100644 index 000000000..5295892ee --- /dev/null +++ b/core/aggregator/netsample/aggregator.go @@ -0,0 +1,30 @@ +package netsample + +import ( + "context" + + "github.com/yandex/pandora/core" +) + +type Aggregator interface { + Run(ctx context.Context, deps core.AggregatorDeps) error + Report(sample *Sample) +} + +func WrapAggregator(a Aggregator) core.Aggregator { return &aggregatorWrapper{a} } + +func UnwrapAggregator(a core.Aggregator) Aggregator { + switch a := a.(type) { + case *aggregatorWrapper: + return a.Aggregator + } + return &aggregatorUnwrapper{a} +} + +type aggregatorWrapper struct{ Aggregator } + +func (a *aggregatorWrapper) Report(s core.Sample) { a.Aggregator.Report(s.(*Sample)) } + +type aggregatorUnwrapper struct{ core.Aggregator } + +func (a *aggregatorUnwrapper) Report(s *Sample) { a.Aggregator.Report(s) } diff --git a/core/aggregator/netsample/netsample_suite_test.go b/core/aggregator/netsample/netsample_suite_test.go new file mode 100644 index 000000000..a243696ba --- /dev/null +++ b/core/aggregator/netsample/netsample_suite_test.go @@ -0,0 +1,11 @@ +package netsample + +import ( + "testing" + + "github.com/yandex/pandora/lib/ginkgoutil" +) + +func TestNetsample(t *testing.T) { + ginkgoutil.RunSuite(t, "Netsample Suite") +} diff --git a/core/aggregator/netsample/phout.go b/core/aggregator/netsample/phout.go new file mode 100644 index 000000000..6be94a31b --- /dev/null +++ b/core/aggregator/netsample/phout.go @@ -0,0 +1,142 @@ +package netsample + +import ( + "bufio" + "context" + "io" + "os" + "strconv" + "time" + + "github.com/c2h5oh/datasize" + "github.com/pkg/errors" + "github.com/spf13/afero" + + "github.com/yandex/pandora/core" + "github.com/yandex/pandora/core/coreutil" +) + +type PhoutConfig struct { + Destination string // Destination file name + Id bool // Print ammo ids if true. + FlushTime time.Duration `config:"flush-time"` + SampleQueueSize int `config:"sample-queue-size"` + Buffer coreutil.BufferSizeConfig `config:",squash"` +} + +func DefaultPhoutConfig() PhoutConfig { + return PhoutConfig{ + FlushTime: time.Second, + SampleQueueSize: 256 * 1024, + Buffer: coreutil.BufferSizeConfig{ + BufferSize: 8 * datasize.MB, + }, + } +} + +func NewPhout(fs afero.Fs, conf PhoutConfig) (a Aggregator, err error) { + filename := conf.Destination + var file afero.File = os.Stdout + if filename != "" { + file, err = fs.Create(conf.Destination) + } + if err != nil { + err = errors.Wrap(err, "phout output file open failed") + return + } + a = &phoutAggregator{ + config: conf, + sink: make(chan *Sample, conf.SampleQueueSize), + writer: bufio.NewWriterSize(file, conf.Buffer.BufferSizeOrDefault()), + buf: make([]byte, 0, 1024), + file: file, + } + return +} + +type phoutAggregator struct { + config PhoutConfig + sink chan *Sample + writer *bufio.Writer + buf []byte + file io.Closer +} + +func (a *phoutAggregator) Report(s *Sample) { a.sink <- s } + +func (a *phoutAggregator) Run(ctx context.Context, _ core.AggregatorDeps) error { + shouldFlush := time.NewTicker(1 * time.Second) + defer func() { + a.writer.Flush() + a.file.Close() + shouldFlush.Stop() + }() +loop: + for { + select { + case r := <-a.sink: + if err := a.handle(r); err != nil { + return err + } + select { + case <-shouldFlush.C: + a.writer.Flush() + default: + } + case <-time.After(1 * time.Second): + a.writer.Flush() + case <-ctx.Done(): + // Context is done, but we should read all data from sink + for { + select { + case r := <-a.sink: + if err := a.handle(r); err != nil { + return err + } + default: + break loop + } + } + } + } + return nil +} + +func (a *phoutAggregator) handle(s *Sample) error { + a.buf = appendPhout(s, a.buf, a.config.Id) + a.buf = append(a.buf, '\n') + _, err := a.writer.Write(a.buf) + a.buf = a.buf[:0] + releaseSample(s) + return err +} + +const phoutDelimiter = '\t' + +func appendPhout(s *Sample, dst []byte, id bool) []byte { + dst = appendTimestamp(s.timeStamp, dst) + dst = append(dst, phoutDelimiter) + dst = append(dst, s.tags...) + if id { + dst = append(dst, '#') + dst = strconv.AppendInt(dst, int64(s.Id()), 10) + } + for _, v := range s.fields { + dst = append(dst, phoutDelimiter) + dst = strconv.AppendInt(dst, int64(v), 10) + } + return dst +} + +func appendTimestamp(ts time.Time, dst []byte) []byte { + // Append time stamp in phout format. Example: 1335524833.562 + // Algorithm: append milliseconds string, than insert dot in right place. + dst = strconv.AppendInt(dst, ts.UnixNano()/1e6, 10) + dotIndex := len(dst) - 3 + dst = append(dst, 0) + for i := len(dst) - 1; i > dotIndex; i-- { + dst[i] = dst[i-1] + } + dst[dotIndex] = '.' + return dst +} diff --git a/core/aggregator/netsample/phout_test.go b/core/aggregator/netsample/phout_test.go new file mode 100644 index 000000000..67f1093e4 --- /dev/null +++ b/core/aggregator/netsample/phout_test.go @@ -0,0 +1,82 @@ +package netsample + +import ( + "context" + "strings" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/spf13/afero" + + "github.com/yandex/pandora/core" +) + +var _ = Describe("Phout", func() { + const fileName = "out.txt" + var ( + fs afero.Fs + conf PhoutConfig + testee Aggregator + ctx context.Context + cancel context.CancelFunc + runErr chan error + ) + getOutput := func() string { + data, err := afero.ReadFile(fs, fileName) + Expect(err).NotTo(HaveOccurred()) + return string(data) + } + + BeforeEach(func() { + fs = afero.NewMemMapFs() + conf = DefaultPhoutConfig() + conf.Destination = fileName + ctx, cancel = context.WithCancel(context.Background()) + }) + JustBeforeEach(func() { + var err error + testee, err = NewPhout(fs, conf) + Expect(err).NotTo(HaveOccurred()) + runErr = make(chan error) + go func() { + runErr <- testee.Run(ctx, core.AggregatorDeps{}) + }() + }) + It("no id by default", func() { + testee.Report(newTestSample()) + testee.Report(newTestSample()) + cancel() + Expect(<-runErr).NotTo(HaveOccurred()) + Expect(getOutput()).To(Equal(strings.Repeat(testSampleNoIdPhout+"\n", 2))) + }, 1) + Context("id option set", func() { + BeforeEach(func() { + conf.Id = true + }) + It("id printed", func() { + testee.Report(newTestSample()) + cancel() + Expect(<-runErr).NotTo(HaveOccurred()) + Expect(getOutput()).To(Equal(testSamplePhout + "\n")) + }, 1) + + }) + +}) + +const ( + testSamplePhout = "1484660999.002 tag1|tag2#42 333333 0 0 0 0 0 0 0 13 999" + testSampleNoIdPhout = "1484660999.002 tag1|tag2 333333 0 0 0 0 0 0 0 13 999" +) + +func newTestSample() *Sample { + s := &Sample{} + s.timeStamp = time.Unix(1484660999, 002*1000000) + s.SetId(42) + s.AddTag("tag1|tag2") + s.setDuration(keyRTTMicro, time.Second/3) + s.set(keyErrno, 13) + s.set(keyProtoCode, ProtoCodeError) + return s +} diff --git a/core/aggregator/netsample/sample.go b/core/aggregator/netsample/sample.go new file mode 100644 index 000000000..e74ed75f6 --- /dev/null +++ b/core/aggregator/netsample/sample.go @@ -0,0 +1,121 @@ +// Copyright (c) 2016 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package netsample + +import ( + "net" + "os" + "sync" + "syscall" + "time" + + "github.com/pkg/errors" +) + +const ( + ProtoCodeError = 999 +) + +const ( + keyRTTMicro = iota + keyConnectMicro // TODO (skipor): set all for HTTP using httptrace and helper structs + keySendMicro + keyLatencyMicro + keyReceiveMicro + keyIntervalEventMicro // TODO: understand WTF is that mean and set it right. + keyRequestBytes + keyResponseBytes + keyErrno + keyProtoCode + fieldsNum +) + +func Acquire(tag string) *Sample { + s := samplePool.Get().(*Sample) + *s = Sample{ + timeStamp: time.Now(), + tags: tag, + } + return s +} + +func releaseSample(s *Sample) { samplePool.Put(s) } + +var samplePool = &sync.Pool{New: func() interface{} { return &Sample{} }} + +type Sample struct { + timeStamp time.Time + tags string + id int + fields [fieldsNum]int + err error +} + +func (s *Sample) Tags() string { return s.tags } +func (s *Sample) AddTag(tag string) { + if s.tags == "" { + s.tags = tag + return + } + s.tags += "|" + tag +} + +func (s *Sample) Id() int { return s.id } +func (s *Sample) SetId(id int) { s.id = id } + +func (s *Sample) ProtoCode() int { return s.get(keyProtoCode) } +func (s *Sample) SetProtoCode(code int) { + s.set(keyProtoCode, code) + s.setRTT() +} + +func (s *Sample) Err() error { return s.err } +func (s *Sample) SetErr(err error) { + s.err = err + s.set(keyErrno, getErrno(err)) + s.setRTT() +} + +func (s *Sample) get(k int) int { return s.fields[k] } +func (s *Sample) set(k, v int) { s.fields[k] = v } +func (s *Sample) setDuration(k int, d time.Duration) { s.set(k, int(d.Nanoseconds()/1000)) } +func (s *Sample) setRTT() { + if s.get(keyRTTMicro) == 0 { + s.setDuration(keyRTTMicro, time.Since(s.timeStamp)) + } +} + +func (s *Sample) String() string { + return string(appendPhout(s, nil, true)) +} + +func getErrno(err error) int { + // stackerr.Error and etc. + type hasUnderlying interface { + Underlying() error + } + for { + typed, ok := err.(hasUnderlying) + if !ok { + break + } + err = typed.Underlying() + } + err = errors.Cause(err) + for { + switch typed := err.(type) { + case *net.OpError: + err = typed.Err + case *os.SyscallError: + err = typed.Err + case syscall.Errno: + return int(typed) + default: + // Legacy default. + return ProtoCodeError + } + } +} diff --git a/core/aggregator/netsample/sample_test.go b/core/aggregator/netsample/sample_test.go new file mode 100644 index 000000000..64948fba4 --- /dev/null +++ b/core/aggregator/netsample/sample_test.go @@ -0,0 +1,89 @@ +// Copyright (c) 2016 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package netsample + +import ( + "fmt" + "net" + "net/http" + "os" + "strconv" + "strings" + "syscall" + "testing" + "time" + + "github.com/facebookgo/stackerr" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" +) + +func TestSampleBehaviour(t *testing.T) { + const tag = "test" + const tag2 = "test2" + const id = 42 + sample := Acquire(tag) + sample.AddTag(tag2) + sample.SetId(id) + const sleep = time.Millisecond + time.Sleep(sleep) + sample.SetErr(syscall.EINVAL) + rtt := time.Duration(sample.get(keyRTTMicro)) * time.Microsecond + assert.NotZero(t, rtt) + assert.True(t, rtt <= time.Since(sample.timeStamp), "expected: %v; actual: %v", rtt, time.Since(sample.timeStamp)) + assert.True(t, sleep <= rtt) + sample.SetProtoCode(http.StatusBadRequest) + expectedTimeStamp := fmt.Sprintf("%v.%3.f", + sample.timeStamp.Unix(), + float32((sample.timeStamp.UnixNano()/1e6)%1000)) + // 1484660999. 2 -> 1484660999.002 + expectedTimeStamp = strings.Replace(expectedTimeStamp, " ", "0", -1) + + expected := fmt.Sprintf("%s\t%s|%s#%v\t%v\t0\t0\t0\t0\t0\t0\t0\t%v\t%v", + expectedTimeStamp, + tag, tag2, id, + sample.get(keyRTTMicro), + int(syscall.EINVAL), http.StatusBadRequest, + ) + assert.Equal(t, expected, sample.String()) +} + +func TestGetErrno(t *testing.T) { + var err error = syscall.EINVAL + err = &os.SyscallError{Err: err} + err = &net.OpError{Err: err} + err = errors.WithStack(err) + err = stackerr.Wrap(err) + assert.NotZero(t, getErrno(err)) + assert.Equal(t, int(syscall.EINVAL), getErrno(err)) +} + +// TODO (skipor): test getErrno on some real net error from stdlib. + +func BenchmarkAppendTimestamp(b *testing.B) { + dst := make([]byte, 0, 512) + ts := time.Now() + b.Run("DotInsert", func(b *testing.B) { + for i := 0; i < b.N; i++ { + appendTimestamp(ts, dst) + dst = dst[:0] + } + }) + b.Run("UseMod", func(b *testing.B) { + for i := 0; i < b.N; i++ { + dst = strconv.AppendInt(dst, ts.Unix(), 10) + dst = append(dst, '.') + dst = strconv.AppendInt(dst, (ts.UnixNano()/1e3)%1e3, 10) + dst = dst[:0] + } + }) + b.Run("NoDot", func(b *testing.B) { + for i := 0; i < b.N; i++ { + dst = strconv.AppendInt(dst, ts.UnixNano()/1e3, 10) + dst = dst[:0] + } + }) +} diff --git a/core/aggregator/netsample/test.go b/core/aggregator/netsample/test.go new file mode 100644 index 000000000..268bdc146 --- /dev/null +++ b/core/aggregator/netsample/test.go @@ -0,0 +1,27 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package netsample + +import ( + "context" + + "github.com/yandex/pandora/core" +) + +type TestAggregator struct { + Samples []*Sample +} + +var _ Aggregator = &TestAggregator{} + +func (t *TestAggregator) Run(ctx context.Context, _ core.AggregatorDeps) error { + <-ctx.Done() + return nil +} + +func (t *TestAggregator) Report(s *Sample) { + t.Samples = append(t.Samples, s) +} diff --git a/core/aggregator/reporter.go b/core/aggregator/reporter.go new file mode 100644 index 000000000..5a3cfed1b --- /dev/null +++ b/core/aggregator/reporter.go @@ -0,0 +1,80 @@ +// Copyright (c) 2018 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package aggregator + +import ( + "fmt" + + "go.uber.org/atomic" + "go.uber.org/zap" + + "github.com/yandex/pandora/core" + "github.com/yandex/pandora/core/coreutil" +) + +type ReporterConfig struct { + // SampleQueueSize is number maximum number of unhandled samples. + // On queue overflow, samples are dropped. + SampleQueueSize int `config:"sample-queue-size" validate:"min=1"` +} + +const ( + samplesPerSecondUpperBound = 128 * 1024 + diskWriteLatencySecondUpperBound = 0.5 + samplesInQueueAfterDiskWriteUpperBound = samplesPerSecondUpperBound * diskWriteLatencySecondUpperBound + DefaultSampleQueueSize = 2 * samplesInQueueAfterDiskWriteUpperBound +) + +func DefaultReporterConfig() ReporterConfig { + return ReporterConfig{ + SampleQueueSize: DefaultSampleQueueSize, + } +} + +func NewReporter(conf ReporterConfig) *Reporter { + return &Reporter{ + Incomming: make(chan core.Sample, conf.SampleQueueSize), + } +} + +type Reporter struct { + Incomming chan core.Sample + samplesDropped atomic.Int64 + lastSampleDropWarn atomic.Int64 +} + +func (a *Reporter) DroppedErr() error { + dropped := a.samplesDropped.Load() + if dropped == 0 { + return nil + } + return &SomeSamplesDropped{dropped} +} + +func (a *Reporter) Report(s core.Sample) { + select { + case a.Incomming <- s: + default: + a.dropSample(s) + } +} + +func (a *Reporter) dropSample(s core.Sample) { + dropped := a.samplesDropped.Inc() + if dropped == 1 { + // AggregatorDeps may not be passed, because Run was not called. + zap.L().Warn("First sample is dropped. More information in Run error") + } + coreutil.ReturnSampleIfBorrowed(s) +} + +type SomeSamplesDropped struct { + Dropped int64 +} + +func (err *SomeSamplesDropped) Error() string { + return fmt.Sprintf("%v samples were dropped", err.Dropped) +} diff --git a/core/aggregator/reporter_test.go b/core/aggregator/reporter_test.go new file mode 100644 index 000000000..1857e7f7b --- /dev/null +++ b/core/aggregator/reporter_test.go @@ -0,0 +1,45 @@ +// Copyright (c) 2018 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package aggregator + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + "go.uber.org/zap/zaptest/observer" + + coremock "github.com/yandex/pandora/core/mocks" + "github.com/yandex/pandora/lib/testutil" +) + +func TestReporter_DroppedErr(t *testing.T) { + core, entries := observer.New(zap.DebugLevel) + zap.ReplaceGlobals(zap.New(core)) + defer testutil.ReplaceGlobalLogger() + reporter := NewReporter(ReporterConfig{1}) + reporter.Report(1) + + assert.Nil(t, reporter.DroppedErr()) + reporter.Report(2) + err := reporter.DroppedErr() + require.Error(t, err) + + assert.EqualValues(t, 1, err.(*SomeSamplesDropped).Dropped) + assert.Equal(t, 1, entries.Len()) +} + +func TestReporter_BorrowedSampleReturnedOnDrop(t *testing.T) { + reporter := NewReporter(ReporterConfig{1}) + + reporter.Report(1) + borrowed := &coremock.BorrowedSample{} + borrowed.On("Return").Once() + + reporter.Report(borrowed) + borrowed.AssertExpectations(t) +} diff --git a/core/aggregator/test.go b/core/aggregator/test.go new file mode 100644 index 000000000..a57b667f7 --- /dev/null +++ b/core/aggregator/test.go @@ -0,0 +1,42 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package aggregator + +import ( + "context" + "sync" + + "github.com/yandex/pandora/core" +) + +func NewTest() *Test { + return &Test{} +} + +type Test struct { + lock sync.Mutex + samples []core.Sample +} + +var _ core.Aggregator = (*Test)(nil) + +func (t *Test) Run(ctx context.Context, _ core.AggregatorDeps) error { + <-ctx.Done() + return nil +} + +func (t *Test) Report(s core.Sample) { + t.lock.Lock() + t.samples = append(t.samples, s) + t.lock.Unlock() +} + +func (t *Test) GetSamples() []core.Sample { + t.lock.Lock() + s := t.samples + t.lock.Unlock() + return s +} diff --git a/core/config/config.go b/core/config/config.go new file mode 100644 index 000000000..429cbbaf8 --- /dev/null +++ b/core/config/config.go @@ -0,0 +1,124 @@ +// Copyright (c) 2016 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package config + +import ( + "github.com/fatih/structs" + "github.com/mitchellh/mapstructure" + "github.com/pkg/errors" +) + +const TagName = "config" + +// Decodes conf to result. Doesn't zero fields. +func Decode(conf interface{}, result interface{}) error { + decoder, err := mapstructure.NewDecoder(newDecoderConfig(result)) + if err != nil { + return errors.WithStack(err) + } + return errors.WithStack(decoder.Decode(conf)) +} + +func DecodeAndValidate(conf interface{}, result interface{}) error { + err := Decode(conf, result) + if err != nil { + return err + } + return Validate(result) +} + +// Map maps with overwrite fields from src to dst. +// if src filed have `map:""` tag, tag value will +// be used as dst field destination. +// src field destinations should be subset of dst fields. +// dst should be struct pointer. src should be struct or struct pointer. +// Example: you need to configure only some subset fields of struct Multi, +// in such case you can from this subset of fields struct Single, decode config +// into it, and map it on Multi. +func Map(dst, src interface{}) { + conf := &mapstructure.DecoderConfig{ + ErrorUnused: true, + ZeroFields: true, + Result: dst, + } + d, err := mapstructure.NewDecoder(conf) + if err != nil { + panic(err) + } + s := structs.New(src) + s.TagName = "map" + err = d.Decode(s.Map()) + if err != nil { + panic(err) + } +} + +func newDecoderConfig(result interface{}) *mapstructure.DecoderConfig { + compileHooks() + return &mapstructure.DecoderConfig{ + DecodeHook: compiledHook, + ErrorUnused: true, + ZeroFields: false, + WeaklyTypedInput: false, + TagName: TagName, + Result: result, + } +} + +type TypeHook mapstructure.DecodeHookFuncType +type KindHook mapstructure.DecodeHookFuncKind + +// Returning value allow do `var _ = AddHookType(xxx)` +func AddTypeHook(hook TypeHook) (_ struct{}) { + addHook(hook) + return +} + +func AddKindHook(hook KindHook) (_ struct{}) { + addHook(hook) + return +} + +func DefaultHooks() []mapstructure.DecodeHookFunc { + return []mapstructure.DecodeHookFunc{ + DebugHook, + TextUnmarshallerHook, + mapstructure.StringToTimeDurationHookFunc(), + StringToURLHook, + StringToIPHook, + StringToDataSizeHook, + } +} + +func GetHooks() []mapstructure.DecodeHookFunc { + return hooks +} +func SetHooks(h []mapstructure.DecodeHookFunc) { + hooks = h + onHooksModify() +} + +var ( + hooks = DefaultHooks() + hooksNeedCompile = true + compiledHook mapstructure.DecodeHookFunc +) + +func addHook(hook mapstructure.DecodeHookFunc) { + hooks = append(hooks, hook) + onHooksModify() +} + +func onHooksModify() { + hooksNeedCompile = true +} + +func compileHooks() { + if hooksNeedCompile { + compiledHook = mapstructure.ComposeDecodeHookFunc(hooks...) + hooksNeedCompile = false + } +} diff --git a/core/config/config_test.go b/core/config/config_test.go new file mode 100644 index 000000000..a602c6bc1 --- /dev/null +++ b/core/config/config_test.go @@ -0,0 +1,233 @@ +// Copyright (c) 2016 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package config + +import ( + "testing" + "time" + + "github.com/facebookgo/stack" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type M map[string]interface{} + +type IPStruct struct { + Val string `validate:"ip"` +} + +func TestDecodeValidate(t *testing.T) { + var data IPStruct + err := DecodeAndValidate(M{"val": "192.300.200.100"}, &data) + assert.Error(t, err) +} + +type NoTagStruct struct { + Val string +} + +const testVal = "test" + +func TestNilInputIsEmptyInput(t *testing.T) { + require.NotPanics(t, func() { + var data NoTagStruct + err := Decode(nil, &data) + assert.NoError(t, err) + }) +} + +func TestNilResultNotPanic(t *testing.T) { + require.NotPanics(t, func() { + err := Decode(M{"val": testVal}, (*NoTagStruct)(nil)) + assert.Error(t, err) + }) +} + +func TestFieldNameDecode(t *testing.T) { + var data NoTagStruct + err := Decode(M{"val": testVal}, &data) + require.NoError(t, err) + assert.Equal(t, testVal, data.Val) +} + +type TagStruct struct { + Val string `config:"valAlias"` +} + +func TestTagDecode(t *testing.T) { + var data TagStruct + err := Decode(M{"ValAlias": testVal}, &data) + require.NoError(t, err) + assert.Equal(t, testVal, data.Val) +} + +func TestErrorUnused(t *testing.T) { + var data NoTagStruct + err := Decode(M{"val": testVal, "unused": testVal}, &data) + require.Error(t, err) + + err = Decode(M{"vval": testVal}, &data) + assert.Error(t, err) +} + +func TestNoWeakTypedInput(t *testing.T) { + var data NoTagStruct + err := Decode(M{"val": 123}, &data) + assert.Error(t, err) +} + +type TimeoutStruct struct { + Timeout time.Duration `validate:"min-time=1s,max-time=20m"` +} + +func TestValidDurationDecode(t *testing.T) { + var data TimeoutStruct + expectedDuration := time.Second * 666 + + err := DecodeAndValidate(M{"timeout": "666s"}, &data) + require.NoError(t, err) + assert.Equal(t, expectedDuration, data.Timeout) +} + +func TestInvalidDurationError(t *testing.T) { + var data TimeoutStruct + + invalidTimeouts := []string{"ssss", "1ss", "1", "1s1", "1.s", "0x50"} + for _, invalid := range invalidTimeouts { + err := DecodeAndValidate(M{"timeout": "ssss"}, &data) + assert.Error(t, err, "invalid case: ", invalid) + } +} + +type Level1 struct { + Val1 string + Val2 string +} + +type Level2 struct { + Val1 Level1 + Val2 Level1 +} + +func TestNestedStructDecode(t *testing.T) { + const ( + iniVal1 = "val1" + iniVal2 = "val2" + newVal = "newVal" + ) + l2 := Level2{ + Level1{ + iniVal1, + iniVal2, + }, + Level1{ + iniVal1, + iniVal2, + }, + } + + err := DecodeAndValidate(M{ + "val1": M{"val1": newVal}, + "val2": M{"val1": ""}, + }, &l2) + require.NoError(t, err) + assert.Equal(t, newVal, l2.Val1.Val1) + assert.Equal(t, iniVal2, l2.Val1.Val2, "one field in intput, but entire struct rewrited") + assert.Equal(t, "", l2.Val2.Val1, "zero value not override default") +} + +type MultiStrings struct { + A string + B string +} + +type SingleString struct { + B string +} + +func TestMapFlat(t *testing.T) { + a := &MultiStrings{} + Map(a, &SingleString{B: "b"}) + assert.Equal(t, &MultiStrings{B: "b"}, a) + + a = &MultiStrings{A: "a", B: "not b"} + Map(a, &SingleString{B: "b"}) + assert.Equal(t, &MultiStrings{A: "a", B: "b"}, a) +} + +func TestMapRecursive(t *testing.T) { + type N struct { + MultiStrings + A string + } + type M struct { + MultiStrings + } + n := &N{MultiStrings: MultiStrings{B: "b"}, A: "a"} + Map(n, &M{MultiStrings: MultiStrings{A: "a"}}) + assert.Equal(t, &N{A: "a", MultiStrings: MultiStrings{A: "a"}}, n) +} + +func TestMapTagged(t *testing.T) { + type N struct { + MultiStrings + A string + } + type M struct { + SomeOtherFieldName MultiStrings `map:"MultiStrings"` + } + n := &N{MultiStrings: MultiStrings{B: "b"}, A: "a"} + Map(n, &M{SomeOtherFieldName: MultiStrings{A: "a"}}) + assert.Equal(t, &N{A: "a", MultiStrings: MultiStrings{A: "a"}}, n) +} + +func TestDeltaUpdate(t *testing.T) { + var l2 Level2 + err := Decode(M{ + "val1": M{"val1": "val1", "val2": "val2"}, + "val2": M{"val1": "val3"}, + }, &l2) + require.NoError(t, err) + assert.Equal(t, "val1", l2.Val1.Val1) + assert.Equal(t, "val2", l2.Val1.Val2) + assert.Equal(t, "val3", l2.Val2.Val1) + assert.Equal(t, "", l2.Val2.Val2) + + err = DecodeAndValidate(M{ + "val1": M{"val1": "val4"}, + "val2": M{"val2": "val5"}, + }, &l2) + require.NoError(t, err) + assert.Equal(t, "val4", l2.Val1.Val1) + assert.Equal(t, "val2", l2.Val1.Val2) + assert.Equal(t, "val3", l2.Val2.Val1) + assert.Equal(t, "val5", l2.Val2.Val2) +} + +func TestNextSquash(t *testing.T) { + data := &struct { + Level1 struct { + Level2 struct { + Foo string + } `config:",squash"` + } `config:",squash"` + }{} + + defer func() { + r := recover() + if r == nil { + return + } + t.Fatalf("panic: %s\n %s", r, stack.Callers(3)) + }() + + err := Decode(M{ + "foo": "baz", + }, &data) + require.NoError(t, err) + assert.Equal(t, "baz", data.Level1.Level2.Foo) +} diff --git a/core/config/doc.go b/core/config/doc.go new file mode 100644 index 000000000..70796cc3c --- /dev/null +++ b/core/config/doc.go @@ -0,0 +1,8 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +// Package config provides advanced framework to decode and validate +// configuration structs. +package config diff --git a/core/config/hooks.go b/core/config/hooks.go new file mode 100644 index 000000000..7e1238b87 --- /dev/null +++ b/core/config/hooks.go @@ -0,0 +1,140 @@ +// Copyright (c) 2016 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package config + +import ( + "encoding" + stderrors "errors" + "fmt" + "net" + "net/url" + "reflect" + + "github.com/asaskevich/govalidator" + "github.com/c2h5oh/datasize" + "github.com/facebookgo/stack" + "github.com/pkg/errors" + "go.uber.org/zap" + + "github.com/yandex/pandora/lib/tag" +) + +var InvalidURLError = errors.New("string is not valid URL") + +var ( + urlPtrType = reflect.TypeOf(&url.URL{}) + urlType = reflect.TypeOf(url.URL{}) +) + +// StringToURLHook converts string to url.URL or *url.URL +func StringToURLHook(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != urlPtrType && t != urlType { + return data, nil + } + str := data.(string) + + if !govalidator.IsURL(str) { // checks more than url.Parse + return nil, errors.WithStack(InvalidURLError) + } + urlPtr, err := url.Parse(str) + if err != nil { + return nil, errors.WithStack(err) + } + + if t == urlType { + return *urlPtr, nil + } + return urlPtr, nil +} + +var InvalidIPError = stderrors.New("string is not valid IP") + +// StringToIPHook converts string to net.IP +func StringToIPHook(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(net.IP{}) { + return data, nil + } + str := data.(string) + ip := net.ParseIP(str) + if ip == nil { + return nil, errors.WithStack(InvalidIPError) + } + return ip, nil +} + +// StringToDataSizeHook converts string to datasize.Single +func StringToDataSizeHook(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(datasize.B) { + return data, nil + } + var size datasize.ByteSize + err := size.UnmarshalText([]byte(data.(string))) + return size, err +} + +var textUnmarshallerType = func() reflect.Type { + var ptr *encoding.TextUnmarshaler + return reflect.TypeOf(ptr).Elem() +} + +func TextUnmarshallerHook(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t.Implements(textUnmarshallerType()) { + val := reflect.Zero(t) + if t.Kind() == reflect.Ptr { + val = reflect.New(t.Elem()) + } + err := unmarhsallText(val, data) + return val.Interface(), err + } + if reflect.PtrTo(t).Implements(textUnmarshallerType()) { + val := reflect.New(t) + err := unmarhsallText(val, data) + return val.Elem().Interface(), err + } + return data, nil +} + +func unmarhsallText(v reflect.Value, data interface{}) error { + unmarshaller := v.Interface().(encoding.TextUnmarshaler) + unmarshaller.UnmarshalText([]byte(data.(string))) + err := unmarshaller.UnmarshalText([]byte(data.(string))) + return err +} + +// DebugHook used to debug config decode. +func DebugHook(f reflect.Type, t reflect.Type, data interface{}) (p interface{}, err error) { + p, err = data, nil + if !tag.Debug { + return + } + callers := stack.Callers(2) + + var decodeCallers int + for _, caller := range callers { + if caller.Name == "(*Decoder).decode" { + decodeCallers++ + } + } + zap.L().Debug("Config decode", + zap.Int("depth", decodeCallers), + zap.Stringer("type", t), + zap.Stringer("from", f), + zap.String("data", fmt.Sprint(data)), + ) + return +} diff --git a/core/config/hooks_test.go b/core/config/hooks_test.go new file mode 100644 index 000000000..1694084c0 --- /dev/null +++ b/core/config/hooks_test.go @@ -0,0 +1,146 @@ +// Copyright (c) 2016 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package config + +import ( + "net" + "net/url" + "strconv" + "testing" + + "github.com/c2h5oh/datasize" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestStringToURLPtrHook(t *testing.T) { + const ( + validURL = "http://yandex.ru" + invalidURL = "http://yandex.ru%%@#$%^&*()%%)(#U@%U)U)##(" + ) + var data struct { + URLPtr *url.URL `validate:"required"` + } + err := DecodeAndValidate(M{"urlptr": validURL}, &data) + require.NoError(t, err) + expectedURL, err := url.Parse(validURL) + require.NoError(t, err) + assert.Equal(t, data.URLPtr, expectedURL) + + err = DecodeAndValidate(M{"urlptr": invalidURL}, &data) + assert.Error(t, err) +} + +func TestStringToURLHook(t *testing.T) { + const ( + validURL = "http://yandex.ru" + invalidURL = "http://yandex.ru%%@#$%^&*()%%)(#U@%U)U)##(" + ) + var data struct { + URL url.URL `validate:"required,url"` + } + err := DecodeAndValidate(M{"url": validURL}, &data) + require.NoError(t, err) + expectedURL, err := url.Parse(validURL) + require.NoError(t, err) + assert.Equal(t, data.URL, *expectedURL) + + err = DecodeAndValidate(M{"url": invalidURL}, &data) + assert.Error(t, err) +} + +func TestStringToIPHook(t *testing.T) { + const ( + validIPv4 = "192.168.0.1" + validIPv6 = "FF80::1" + invalidIP = "that is not ip" + ) + var data struct { + IP net.IP `validate:"required"` + } + + err := DecodeAndValidate(M{"ip": validIPv4}, &data) + require.NoError(t, err) + expectedIP := net.ParseIP(validIPv4) + require.NoError(t, err) + assert.Equal(t, data.IP, expectedIP) + + err = DecodeAndValidate(M{"ip": validIPv6}, &data) + require.NoError(t, err) + expectedIP = net.ParseIP(validIPv6) + require.NoError(t, err) + assert.Equal(t, data.IP, expectedIP) + + err = DecodeAndValidate(M{"ip": invalidIP}, &data) + assert.Error(t, err) +} + +func TestStringToDataSizeHook(t *testing.T) { + var data struct { + Size datasize.ByteSize `validate:"min-size=128b"` + } + + err := Decode(M{"size": "0"}, &data) + assert.NoError(t, err) + assert.Error(t, Validate(data)) + assert.EqualValues(t, 0, data.Size) + + err = Decode(M{"size": "128"}, &data) + assert.NoError(t, err) + assert.NoError(t, Validate(data)) + assert.EqualValues(t, 128, data.Size) + + err = Decode(M{"size": "5mb"}, &data) + assert.NoError(t, err) + assert.EqualValues(t, 5*datasize.MB, data.Size) + + err = Decode(M{"size": "127KB"}, &data) + assert.NoError(t, err) + assert.EqualValues(t, 127*datasize.KB, data.Size) + + err = Decode(M{"size": "Bullshit"}, &data) + assert.Error(t, err) +} + +type ptrUnmarshaller int64 + +func (i *ptrUnmarshaller) UnmarshalText(text []byte) error { + val, err := strconv.ParseInt(string(text), 10, 64) + if err != nil { + return err + } + *i = ptrUnmarshaller(val) + return nil +} + +type valueUnmarshaller struct{ Value *int64 } + +var valueUnmarshallerSink int64 + +func (v valueUnmarshaller) UnmarshalText(text []byte) error { + val, err := strconv.ParseInt(string(text), 10, 64) + if err != nil { + return err + } + valueUnmarshallerSink = val + return nil +} + +func TestTextUnmarshallerHookImplementsByValue(t *testing.T) { + var data struct { + Val valueUnmarshaller + } + data.Val.Value = new(int64) + + err := Decode(M{"val": "0"}, &data) + assert.NoError(t, err) + assert.EqualValues(t, 0, valueUnmarshallerSink) + + err = Decode(M{"val": "128"}, &data) + assert.NoError(t, err) + assert.EqualValues(t, 128, valueUnmarshallerSink) + +} diff --git a/core/config/validations.go b/core/config/validations.go new file mode 100644 index 000000000..18eaff0b1 --- /dev/null +++ b/core/config/validations.go @@ -0,0 +1,66 @@ +// Copyright (c) 2016 Yandex LLC. All rights reserved. +// Author: Vladimir Skipor + +package config + +import ( + "net" + "regexp" + "time" + + "github.com/asaskevich/govalidator" + "github.com/c2h5oh/datasize" + "gopkg.in/bluesuncorp/validator.v9" +) + +func MinTimeValidation(fl validator.FieldLevel) bool { + t, min, ok := getTimeForValidation(fl.Field().Interface(), fl.Param()) + return ok && min <= t +} +func MaxTimeValidation(fl validator.FieldLevel) bool { + t, max, ok := getTimeForValidation(fl.Field().Interface(), fl.Param()) + return ok && t <= max +} + +func getTimeForValidation(v interface{}, param string) (actual time.Duration, check time.Duration, ok bool) { + check, err := time.ParseDuration(param) + if err != nil { + return + } + actual, ok = v.(time.Duration) + return +} + +func MinSizeValidation(fl validator.FieldLevel) bool { + t, min, ok := getSizeForValidation(fl.Field().Interface(), fl.Param()) + return ok && min <= t +} +func MaxSizeValidation(fl validator.FieldLevel) bool { + t, max, ok := getSizeForValidation(fl.Field().Interface(), fl.Param()) + return ok && t <= max +} + +func getSizeForValidation(v interface{}, param string) (actual, check datasize.ByteSize, ok bool) { + err := check.UnmarshalText([]byte(param)) + if err != nil { + return + } + actual, ok = v.(datasize.ByteSize) + return +} + +// "host:port" or ":port" +func EndpointStringValidation(value string) bool { + host, port, err := net.SplitHostPort(value) + return err == nil && + (host == "" || govalidator.IsHost(host)) && + govalidator.IsPort(port) +} + +// pathRegexp is regexp for at least one path component. +// Valid characters are taken from RFC 3986. +var pathRegexp = regexp.MustCompile(`^(/[a-zA-Z0-9._~!$&'()*+,;=:@%-]+)+$`) + +func URLPathStringValidation(value string) bool { + return pathRegexp.MatchString(value) +} diff --git a/core/config/validations_test.go b/core/config/validations_test.go new file mode 100644 index 000000000..0d74acc89 --- /dev/null +++ b/core/config/validations_test.go @@ -0,0 +1,63 @@ +// Copyright (c) 2016 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package config + +import ( + "testing" + "time" + + "github.com/c2h5oh/datasize" + "github.com/stretchr/testify/assert" +) + +type WithDuration struct { + T time.Duration `validate:"min-time=10ms,max-time=100ms"` +} + +func TestValidateTimeDuration(t *testing.T) { + assert.NoError(t, Validate(&WithDuration{50 * time.Millisecond})) + + assert.Error(t, Validate(&WithDuration{5 * time.Millisecond})) + assert.Error(t, Validate(&WithDuration{500 * time.Millisecond})) +} + +type WithSize struct { + T datasize.ByteSize `validate:"min-size=10kb,max-size=10M"` +} + +func TestValidateByteSize(t *testing.T) { + assert.NoError(t, Validate(&WithSize{50 * datasize.KB})) + + assert.Error(t, Validate(&WithSize{5 * datasize.KB})) + assert.Error(t, Validate(&WithSize{500 * datasize.MB})) +} + +type WithEndpoint struct { + Endpoint string `validate:"required,endpoint"` +} + +func TestValidateEndpoint(t *testing.T) { + assert.NoError(t, Validate(&WithEndpoint{"192.168.0.1:9999"})) + assert.NoError(t, Validate(&WithEndpoint{"localhost:9999"})) + assert.NoError(t, Validate(&WithEndpoint{":9999"})) + + assert.Error(t, Validate(&WithEndpoint{"aaaaa"})) +} + +type WithURLPath struct { + Path string `validate:"url-path"` +} + +func TestValidatePath(t *testing.T) { + assert.NoError(t, Validate(&WithURLPath{"/some"})) + assert.NoError(t, Validate(&WithURLPath{"/just/some/path"})) + assert.NoError(t, Validate(&WithURLPath{"/just@/some()/+@!="})) + assert.Error(t, Validate(&WithURLPath{""})) + assert.Error(t, Validate(&WithURLPath{"/"})) + assert.Error(t, Validate(&WithURLPath{"/just/"})) + assert.Error(t, Validate(&WithURLPath{"/just//some"})) + assert.Error(t, Validate(&WithURLPath{"/just/some?"})) +} diff --git a/core/config/validator.go b/core/config/validator.go new file mode 100644 index 000000000..d1abf67a9 --- /dev/null +++ b/core/config/validator.go @@ -0,0 +1,85 @@ +// Copyright (c) 2016 Yandex LLC. All rights reserved. +// Author: Vladimir Skipor + +package config + +import ( + "github.com/pkg/errors" + "gopkg.in/bluesuncorp/validator.v9" +) + +var validations = []struct { + key string + val validator.Func +}{ + {"min-time", MinTimeValidation}, + {"max-time", MaxTimeValidation}, + {"min-size", MinSizeValidation}, + {"max-size", MaxSizeValidation}, +} + +var stringValidations = []struct { + key string + val StringValidation +}{ + {"endpoint", EndpointStringValidation}, + {"url-path", URLPathStringValidation}, +} + +var defaultValidator = newValidator() + +func Validate(value interface{}) error { + return errors.WithStack(defaultValidator.Struct(value)) +} + +func newValidator() *validator.Validate { + validate := validator.New() + validate.SetTagName("validate") + for _, val := range validations { + validate.RegisterValidation(val.key, val.val) + } + for _, val := range stringValidations { + validate.RegisterValidation(val.key, StringToAbstractValidation(val.val)) + } + return validate +} + +// RegisterCustom used to set custom validation check hooks on specific types, +// that will be called on such type validation, even if it is nested field. +func RegisterCustom(v CustomValidation, types ...interface{}) (_ struct{}) { + if len(types) < 1 { + panic("should be registered for at least one type") + } + defaultValidator.RegisterStructValidation(func(sl validator.StructLevel) { + v(structLevelHandle{sl}) + }, types...) + return +} + +type StringValidation func(value string) bool + +// StringToAbstractValidation wraps StringValidation into validator.Func. +func StringToAbstractValidation(sv StringValidation) validator.Func { + return func(fl validator.FieldLevel) bool { + if strVal, ok := fl.Field().Interface().(string); ok { + return sv(strVal) + } + return false + } +} + +type ValidateHandle interface { + Value() interface{} + ReportError(field, reason string) +} + +type CustomValidation func(h ValidateHandle) + +type structLevelHandle struct{ validator.StructLevel } + +var _ ValidateHandle = structLevelHandle{} + +func (sl structLevelHandle) Value() interface{} { return sl.Current().Interface() } +func (sl structLevelHandle) ReportError(field, reason string) { + sl.StructLevel.ReportError(nil, field, "", reason, "") +} diff --git a/core/config/validator_test.go b/core/config/validator_test.go new file mode 100644 index 000000000..f2a7b0396 --- /dev/null +++ b/core/config/validator_test.go @@ -0,0 +1,91 @@ +// Copyright (c) 2016 Yandex LLC. All rights reserved. +// Author: Vladimir Skipor + +package config + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type WithURLString struct { + URL string `validate:"required,url"` +} + +func TestValidateURL(t *testing.T) { + require.NoError(t, Validate(&WithURLString{"http://yandex.ru/"})) + + err := Validate(&WithURLString{"http://yandex.ru/%zz"}) + require.Error(t, err) + + err = Validate(&WithURLString{}) + assert.Error(t, err) +} + +type Multi struct { + A int `validate:"min=1"` + B int `validate:"min=2"` +} + +type Single struct { + X int `validate:"max=0,min=10"` +} + +type Nested struct { + A Multi +} + +func TestValidateOK(t *testing.T) { + assert.NoError(t, Validate(&Multi{1, 2})) +} + +func TestValidateError(t *testing.T) { + err := Validate(&Multi{0, 2}) + require.Error(t, err) + + err = Validate(&Multi{0, 0}) + require.Error(t, err) + + err = Validate(&Single{5}) + assert.Error(t, err) +} + +func TestNestedError(t *testing.T) { + c := &Nested{ + Multi{0, 0}, + } + require.Error(t, Validate(c.A)) + err := Validate(c) + assert.Error(t, err) +} + +func TestValidateUnsupported(t *testing.T) { + err := Validate(1) + assert.Error(t, err) +} + +type D struct { + Val string `validate:"invalidNameXXXXXXX=1"` +} + +func TestValidateInvalidValidatorName(t *testing.T) { + require.Panics(t, func() { + Validate(&D{"test"}) + }) +} + +func TestCustom(t *testing.T) { + defer func() { + defaultValidator = newValidator() + }() + type custom struct{ fail bool } + RegisterCustom(func(h ValidateHandle) { + if h.Value().(custom).fail { + h.ReportError("fail", "should be false") + } + }, custom{}) + assert.NoError(t, Validate(&custom{fail: false})) + assert.Error(t, Validate(&custom{fail: true})) +} diff --git a/core/core.go b/core/core.go new file mode 100644 index 000000000..07164e034 --- /dev/null +++ b/core/core.go @@ -0,0 +1,233 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +// package core defines pandora engine extension points. +// Core interfaces implementations MAY be used for custom engine creation and using as a library, +// or MAY be registered in pandora plugin system (look at core/plugin package), for creating engine +// from abstract config. +// +// The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD", "SHOULD NOT", +// "RECOMMENDED", "MAY", and "OPTIONAL" in that package doc are to be interpreted as described in +// https://www.ietf.org/rfc/rfc2119.txt +package core + +import ( + "context" + "io" + "time" + + "go.uber.org/zap" +) + +// Ammo is data required for one shot. SHOULD contains something that differs +// from one shot to another. Something like requested recourse indetificator, query params, +// meta information helpful for future shooting analysis. +// Information common for all shoots SHOULD be passed via Provider configuration. +type Ammo interface{} + +// ResettableAmmo is ammo that can be efficiently reset before reuse. +// Generic Provider (Provider that accepts undefined type of Ammo) SHOULD Reset Ammo before reuse +// if it implements ResettableAmmo. +// Ammo that is not going to be used with generic Providers don't need to implement ResettableAmmo. +type ResettableAmmo interface { + Ammo + Reset() +} + +//go:generate mockery -name=Provider -case=underscore -outpkg=coremock + +// Provider is routine that generates ammo for Instance shoots. +// A Provider MUST be goroutine safe. +type Provider interface { + // Run starts provider routine of ammo generation. + // Blocks until ammo finish, error or context cancel. + // Run MUST be called only once. Run SHOULD be called before Acquire or Release calls, but + // MAY NOT because of goroutine races. + // In case of ctx cancel, SHOULD return nil, but MAY ctx.Err(), or error caused ctx.Err() + // in terms of github.com/pkg/errors.Cause. + Run(ctx context.Context, deps ProviderDeps) error + // Acquire acquires ammo for shoot. Acquire SHOULD be lightweight, so Instance can Shoot as + // soon as possible. That means ammo format parsing SHOULD be done in Provider Run goroutine, + // but acquire just takes ammo from ready queue. + // Ok false means that shooting MUST be stopped because ammo finished or shooting is canceled. + // Acquire MAY be called before Run, but SHOULD block until Run is called. + Acquire() (ammo Ammo, ok bool) + // Release notifies that ammo usage is finished, and it can be reused. + // Instance MUST NOT retain references to released ammo. + Release(ammo Ammo) +} + +// ProviderDeps are passed to Provider in Run. +// WARN: another fields could be added in next MINOR versions. +// That is NOT considered as a breaking compatibility change. +type ProviderDeps struct { + Log *zap.Logger +} + +//go:generate mockery -name=Gun -case=underscore -outpkg=coremock + +// Gun represents logic of making shoots sequentially. +// A Gun is owned by only Instance that uses it for shooting in cycle: Acquire Ammo from Provider -> +// wait for next shoot schedule event -> Shoot with Gun. +// Guns that also implements io.Closer will be Closed after Instance finish. +// Rule of thumb: Guns that create resources which SHOULD be closed after Instance finish, +// SHOULD implement io.Closer. +// Example: Gun that makes HTTP requests through keep alive connection SHOULD close it in Close. +type Gun interface { + // Bind passes dependencies required for shooting. MUST be called once before any Shoot call. + Bind(aggr Aggregator, deps GunDeps) error + // Shoot makes one shoot. Shoot means some abstract load operation: web service or database + // request, for example. + // During shoot Gun SHOULD Acquire one or more Samples and Report them to bound Aggregator. + // Shoot error that MAY mean service under load fail SHOULD be reported to Aggregator in sample + // and SHOULD be logged to deps.Log at zap.WarnLevel. + // For example, HTTP request fail SHOULD be Reported and logged,. + // In case of error, that SHOULD cancel shooting for all Instances Shoot MUST panic using error + // value describing the problem. That could be configuration error, unsupported Ammo type, + // situation when service under load doesn't support required protocol, + Shoot(ammo Ammo) + + // io.Closer // OPTIONAL to implement. See Gun doc for details. +} + +// GunDeps are passed to Gun before Instance Run. +// WARN: another fields could be added in next MINOR versions. +// That is NOT considered as a breaking compatibility change. +type GunDeps struct { + // Ctx is canceled on shoot cancel or finish. + Ctx context.Context + // Log fields already contains Id's of Pool and Instance. + Log *zap.Logger + // Unique of Gun owning Instance. MAY be used for tagging Samples. + // Pool set's ids to Instances from 0, incrementing it after Instance Run. + // There is a race between Instances for Ammo Acquire, so it's not guaranteed, that + // Instance with lower InstanceId gets it's Ammo earlier. + InstanceId int + // TODO(skipor): https://github.com/yandex/pandora/issues/71 + // Pass parallelism value. InstanceId MUST be -1 if parallelism > 1. +} + +// Sample is data containing shoot report. Return code, timings, shoot meta information. +type Sample interface{} + +//go:generate mockery -name=BorrowedSample -case=underscore -outpkg=coremock + +// BorrowedSample is Sample that was borrowed from pool, and SHOULD be returned by Aggregator, +// after it will handle Sample. +type BorrowedSample interface { + Sample + Return() +} + +//go:generate mockery -name=Aggregator -case=underscore -outpkg=coremock + +// Aggregator is routine that aggregates Samples from all Pool Instances. +// Usually aggregator is shooting result reporter, that writes Reported Samples +// to DataSink in machine readable format for future analysis. +// An Aggregator MUST be goroutine safe. +// GunDeps are passed to Gun before Instance Run. +type Aggregator interface { + // Run starts aggregator routine of handling Samples. Blocks until fail or context cancel. + // Run MUST be called only once. Run SHOULD be called before Report calls, but MAY NOT because + // of goroutine races. + // In case of ctx cancel, SHOULD return nil, but MAY ctx.Err(), or error caused ctx.Err() + // in terms of github.com/pkg/errors.Cause. + // In case of any dropped Sample (unhandled because of Sample queue overflow) Run SHOULD return + // error describing how many samples were dropped. + Run(ctx context.Context, deps AggregatorDeps) error + // Report reports sample to aggregator. SHOULD be lightweight and not blocking, + // so Instance can Shoot as soon as possible. + // That means, that Sample encode and reporting SHOULD NOT be done in caller goroutine, + // but SHOULD in Aggregator Run goroutine. + // If Aggregator can't handle Reported Sample without blocking, it SHOULD just drop it. + // Reported Samples MAY just be dropped, after context cancel. + // Reported Sample MAY be reused for efficiency, so caller MUST NOT retain reference to Sample. + // Report MAY be called before Aggregator Run. Report MAY be called after Run finish, in case of + // Pool Run cancel. + // Aggregator SHOULD Return Sample if it implements BorrowedSample. + Report(s Sample) +} + +// AggregatorDeps are passed to Aggregator in Run. +// WARN: another fields could be added in next MINOR versions. +// That is NOT considered as a breaking compatibility change. +type AggregatorDeps struct { + Log *zap.Logger +} + +//go:generate mockery -name=Schedule -case=underscore -outpkg=coremock + +// Schedule represents operation schedule. Schedule MUST be goroutine safe. +type Schedule interface { + // Start starts schedule at passed time. + // Start SHOULD be called once, before any Next call. + // Start MUST NOT be called more than once or after Next call. + // If Start was not called, Schedule MUST be started on first Next call. + Start(startAt time.Time) + + // Next withdraw one operation token and returns next operation time and + // ok equal true, when Schedule is not finished. + // If there is no operation tokens left, Next returns Schedule finish time and ok equals false. + // If Next called first time and Start was not called, Schedule MUST start and return tx + // equal to start time. + // Returned ts values MUST increase monotonically. That is, ts returned on next Next call MUST + // be greater or equal than returned on previous. + Next() (ts time.Time, ok bool) + + // Left returns n >= 0 number operation token left, if it is known exactly. + // Returns n < 0, if number of operation tokens is unknown. + // Left MAY be called before Start. + Left() int +} + +//go:generate mockery -name=DataSource -case=underscore -outpkg=coremock + +// DataSource is abstract, ready to only open, source of data. +// Returned source MUST implement io.ReadCloser at least, but can implement more wide interface, +// and this interface methods MAY be used. For example, returned source can be afero.File, +// and can be seeked in such case. +// Examples: +// Dummy os.Stdin wrapper. +// File DataSource that contains filename and afero.Fs, and returns afero.File on OpenSource. +// HTTP DataSource that contains URL and headers used on OpenSource to download content to file, +// and return afero.File, that will be deleted on rc Close. +// String DataSource returns just wrapped *bytes.Buffer with string content. +type DataSource interface { + // OpenSource opens source for read. OpenSource MUST NOT be called more than once. + // Returned rc SHOULD have low latency and good enough throughput for Read. + // rc MAY be afero.File but SHOULD NOT be TCP connection for example. + // DataSource MAY be some remote resource, but OpenSource SHOULD download all necessary data to + // local temporary file and return it as rc. + // Rule of thumb: returned rc SHOULD be afero.File or wrapped *bytes.Buffer. + // Returned rc SHOULD cleanup all created temporary resources on Close. + // rc owner SHOULD NOT try cast it to concrete types. For example, rc can be + // wrapped temporary *os.File, that will be deleted on Close, so it can't be casted to *os.File, + // because has type of wrapper, but can be used as afero.File. + // rc Reads SHOULD be buffered for better performance if it doesn't implement io.ByteReader. + // That usually means that short Reads are efficient enough. It is implemented by *bytes.Buffer + // and *bufio.Reader, for example. rc Reads MAY be buffered regardless. + OpenSource() (rc io.ReadCloser, err error) +} + +//go:generate mockery -name=DataSink -case=underscore -outpkg=coremock + +// DataSink is abstract ready to open sink of data. +// +// Examples: +// Dummy os.Stdout wrapper. +// File DataSink that contains filename and afero.Fs, and returns afero.File on OpenSource. +// HTTP DataSink caches Written data to temporary file on wc Writes, +// and POST it using contained URL and headers on wc Close. +type DataSink interface { + // OpenSink opens sink for writing. OpenSink MUST NOT be called more than once. + // Returned wc SHOULD have low latency and good enough throughput for Write. + // wc MAY be afero.File but SHOULD NOT be TCP connection for example. + // DataSink MAY upload Wrote data somewhere but SHOULD do it on wc Close or in background + // goroutine. + // wc Writes SHOULD be buffered for better performance if it doesn't implement io.ByteWriter. + // That usually means that short Writes are efficient enough. It is implemented by *bytes.Buffer + // and *bufio.Writer, for example. wc Writes MAY be buffered regardless. + OpenSink() (wc io.WriteCloser, err error) +} diff --git a/core/coretest/config.go b/core/coretest/config.go new file mode 100644 index 000000000..33204b753 --- /dev/null +++ b/core/coretest/config.go @@ -0,0 +1,24 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package coretest + +import ( + "github.com/onsi/gomega" + "github.com/yandex/pandora/core/config" + "github.com/yandex/pandora/lib/ginkgoutil" +) + +func Decode(data string, result interface{}) { + conf := ginkgoutil.ParseYAML(data) + err := config.Decode(conf, result) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) +} + +func DecodeAndValidate(data string, result interface{}) { + Decode(data, result) + err := config.Validate(result) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) +} diff --git a/core/coretest/schedule.go b/core/coretest/schedule.go new file mode 100644 index 000000000..8af768391 --- /dev/null +++ b/core/coretest/schedule.go @@ -0,0 +1,57 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package coretest + +import ( + "time" + + . "github.com/onsi/gomega" + "github.com/yandex/pandora/core" +) + +func ExpectScheduleNextsStartAt(sched core.Schedule, startAt time.Time, nexts ...time.Duration) { + beforeStartLeft := sched.Left() + tokensExpected := len(nexts) - 1 // Last next is finish time. + Expect(beforeStartLeft).To(Equal(tokensExpected)) + sched.Start(startAt) + actualNexts := DrainScheduleDuration(sched, startAt) + Expect(actualNexts).To(Equal(nexts)) +} + +func ExpectScheduleNexts(sched core.Schedule, nexts ...time.Duration) { + ExpectScheduleNextsStartAt(sched, time.Now(), nexts...) +} + +const drainLimit = 1000000 + +// DrainSchedule starts schedule and takes all tokens from it. +// Returns all tokens and finish time relative to start +func DrainScheduleDuration(sched core.Schedule, startAt time.Time) []time.Duration { + nexts := DrainSchedule(sched) + durations := make([]time.Duration, len(nexts)) + for i, next := range nexts { + durations[i] = next.Sub(startAt) + } + return durations +} + +// DrainSchedule takes all tokens from passed schedule. +// Returns all tokens and finish time. +func DrainSchedule(sched core.Schedule) []time.Time { + expectedLeft := sched.Left() + var nexts []time.Time + for len(nexts) < drainLimit { + next, ok := sched.Next() + nexts = append(nexts, next) + if !ok { + Expect(sched.Left()).To(Equal(0)) + return nexts + } + expectedLeft-- + Expect(sched.Left()).To(Equal(expectedLeft)) + } + panic("drain limit reached") +} diff --git a/core/coretest/sink.go b/core/coretest/sink.go new file mode 100644 index 000000000..a0158263b --- /dev/null +++ b/core/coretest/sink.go @@ -0,0 +1,64 @@ +// Copyright (c) 2018 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package coretest + +import ( + "io" + "io/ioutil" + "os" + "testing" + + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/yandex/pandora/core" +) + +func AssertSinkEqualStdStream(t *testing.T, expectedPtr **os.File, getSink func() core.DataSink) { + temp, err := ioutil.TempFile("", "") + require.NoError(t, err) + + backup := *expectedPtr + defer func() { + *expectedPtr = backup + }() + *expectedPtr = temp + const testdata = "abcd" + + wc, err := getSink().OpenSink() + require.NoError(t, err) + + _, err = io.WriteString(wc, testdata) + require.NoError(t, err) + + err = wc.Close() + require.NoError(t, err) + + temp.Seek(0, io.SeekStart) + data, err := ioutil.ReadAll(temp) + assert.Equal(t, testdata, string(data)) +} + +func AssertSinkEqualFile(t *testing.T, fs afero.Fs, filename string, sink core.DataSink) { + afero.WriteFile(fs, filename, []byte("should be truncated"), 644) + + wc, err := sink.OpenSink() + require.NoError(t, err) + + const testdata = "abcd" + + _, err = io.WriteString(wc, testdata) + require.NoError(t, err) + + err = wc.Close() + require.NoError(t, err) + + data, err := afero.ReadFile(fs, filename) + require.NoError(t, err) + + assert.Equal(t, testdata, string(data)) +} diff --git a/core/coretest/source.go b/core/coretest/source.go new file mode 100644 index 000000000..23d4acb5c --- /dev/null +++ b/core/coretest/source.go @@ -0,0 +1,58 @@ +// Copyright (c) 2018 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package coretest + +import ( + "io" + "io/ioutil" + "os" + "testing" + + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/yandex/pandora/core" + "github.com/yandex/pandora/lib/testutil" +) + +func AssertSourceEqualStdStream(t *testing.T, expectedPtr **os.File, getSource func() core.DataSource) { + temp, err := ioutil.TempFile("", "") + require.NoError(t, err) + + backup := *expectedPtr + defer func() { + *expectedPtr = backup + }() + *expectedPtr = temp + const testdata = "abcd" + _, err = io.WriteString(temp, testdata) + require.NoError(t, err) + + rc, err := getSource().OpenSource() + require.NoError(t, err) + + err = rc.Close() + require.NoError(t, err, "std stream should not be closed") + + temp.Seek(0, io.SeekStart) + data, err := ioutil.ReadAll(temp) + assert.Equal(t, testdata, string(data)) +} + +func AssertSourceEqualFile(t *testing.T, fs afero.Fs, filename string, source core.DataSource) { + const testdata = "abcd" + afero.WriteFile(fs, filename, []byte(testdata), 644) + + rc, err := source.OpenSource() + require.NoError(t, err) + + data := testutil.ReadString(t, rc) + err = rc.Close() + require.NoError(t, err) + + assert.Equal(t, testdata, data) +} diff --git a/core/coreutil/ammo.go b/core/coreutil/ammo.go new file mode 100644 index 000000000..a2ce8a667 --- /dev/null +++ b/core/coreutil/ammo.go @@ -0,0 +1,24 @@ +// Copyright (c) 2018 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package coreutil + +import ( + "reflect" + + "github.com/yandex/pandora/core" +) + +// ResetReusedAmmo sets to zero any ammo. +// Used by core.Provider implementations that accepts generic type, and need to clean reused ammo +// before fill with fresh data. +func ResetReusedAmmo(ammo core.Ammo) { + if resettable, ok := ammo.(core.ResettableAmmo); ok { + resettable.Reset() + return + } + elem := reflect.ValueOf(ammo).Elem() + elem.Set(reflect.Zero(elem.Type())) +} diff --git a/core/coreutil/buffer_size_config.go b/core/coreutil/buffer_size_config.go new file mode 100644 index 000000000..a7fac9293 --- /dev/null +++ b/core/coreutil/buffer_size_config.go @@ -0,0 +1,30 @@ +// Copyright (c) 2018 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package coreutil + +import ( + "github.com/c2h5oh/datasize" +) + +const DefaultBufferSize = 512 * 1024 +const MinimalBufferSize = 4 * 1024 + +// BufferSizeConfig SHOULD be used to configure buffer size. +// That makes buffer size configuration consistent among all Aggregators. +type BufferSizeConfig struct { + BufferSize datasize.ByteSize `config:"buffer-size"` +} + +func (conf BufferSizeConfig) BufferSizeOrDefault() int { + bufSize := int(conf.BufferSize) + if bufSize == 0 { + return DefaultBufferSize + } + if bufSize <= MinimalBufferSize { + return MinimalBufferSize + } + return bufSize +} diff --git a/core/coreutil/buffer_size_config_test.go b/core/coreutil/buffer_size_config_test.go new file mode 100644 index 000000000..13223be73 --- /dev/null +++ b/core/coreutil/buffer_size_config_test.go @@ -0,0 +1,23 @@ +// Copyright (c) 2018 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package coreutil + +import ( + "testing" + + "github.com/c2h5oh/datasize" + "github.com/magiconair/properties/assert" +) + +func TestBufferSizeConfig_BufferSizeOrDefault(t *testing.T) { + get := func(s datasize.ByteSize) int { + return BufferSizeConfig{s}.BufferSizeOrDefault() + } + assert.Equal(t, DefaultBufferSize, get(0)) + assert.Equal(t, MinimalBufferSize, get(1)) + const big = DefaultBufferSize * DefaultBufferSize + assert.Equal(t, big, get(big)) +} diff --git a/core/coreutil/data.go b/core/coreutil/data.go new file mode 100644 index 000000000..241e8ca31 --- /dev/null +++ b/core/coreutil/data.go @@ -0,0 +1,28 @@ +// Copyright (c) 2018 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package coreutil + +import ( + "io" + + "github.com/yandex/pandora/core" +) + +type DataSinkFunc func() (wc io.WriteCloser, err error) + +func (f DataSinkFunc) OpenSink() (wc io.WriteCloser, err error) { + return f() +} + +var _ core.DataSink = DataSinkFunc(nil) + +type DataSourceFunc func() (wc io.ReadCloser, err error) + +func (f DataSourceFunc) OpenSource() (rc io.ReadCloser, err error) { + return f() +} + +var _ core.DataSource = DataSourceFunc(nil) diff --git a/core/coreutil/doc.go b/core/coreutil/doc.go new file mode 100644 index 000000000..6ee897e82 --- /dev/null +++ b/core/coreutil/doc.go @@ -0,0 +1,9 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +// package coreutil provides utilities for core interfaces, that can be useful +// not only for engine, but other core importers too. +// coreutil MUST NOT depend on any core subpackage, because they may import coreutil. +package coreutil diff --git a/core/coreutil/sample.go b/core/coreutil/sample.go new file mode 100644 index 000000000..5c56c5c1b --- /dev/null +++ b/core/coreutil/sample.go @@ -0,0 +1,16 @@ +// Copyright (c) 2018 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package coreutil + +import "github.com/yandex/pandora/core" + +func ReturnSampleIfBorrowed(s core.Sample) { + borrowed, ok := s.(core.BorrowedSample) + if !ok { + return + } + borrowed.Return() +} diff --git a/core/coreutil/schedule.go b/core/coreutil/schedule.go new file mode 100644 index 000000000..f56537858 --- /dev/null +++ b/core/coreutil/schedule.go @@ -0,0 +1,46 @@ +// Copyright (c) 2018 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package coreutil + +import ( + "sync" + "time" + + "github.com/yandex/pandora/core" +) + +// NewCallbackOnFinishSchedule returns schedule that calls back once onFinish +// just before first callee could know, that schedule is finished. +// That is, calls onFinish once, first time, whet Next() returns ok == false +// or Left() returns 0. +func NewCallbackOnFinishSchedule(s core.Schedule, onFinish func()) core.Schedule { + return &callbackOnFinishSchedule{ + Schedule: s, + onFinish: onFinish, + } +} + +type callbackOnFinishSchedule struct { + core.Schedule + onFinishOnce sync.Once + onFinish func() +} + +func (s *callbackOnFinishSchedule) Next() (ts time.Time, ok bool) { + ts, ok = s.Schedule.Next() + if !ok { + s.onFinishOnce.Do(s.onFinish) + } + return +} + +func (s *callbackOnFinishSchedule) Left() int { + left := s.Schedule.Left() + if left == 0 { + s.onFinishOnce.Do(s.onFinish) + } + return left +} diff --git a/core/coreutil/schedule_test.go b/core/coreutil/schedule_test.go new file mode 100644 index 000000000..93d84f3c5 --- /dev/null +++ b/core/coreutil/schedule_test.go @@ -0,0 +1,42 @@ +// Copyright (c) 2018 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package coreutil + +import ( + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/yandex/pandora/core/schedule" +) + +var _ = Describe("callback on finish schedule", func() { + It("callback once", func() { + var callbackTimes int + wrapped := schedule.NewOnce(1) + testee := NewCallbackOnFinishSchedule(wrapped, func() { + callbackTimes++ + }) + startAt := time.Now() + testee.Start(startAt) + tx, ok := testee.Next() + Expect(ok).To(BeTrue()) + Expect(tx).To(Equal(startAt)) + Expect(callbackTimes).To(Equal(0)) + + tx, ok = testee.Next() + Expect(ok).To(BeFalse()) + Expect(tx).To(Equal(startAt)) + Expect(callbackTimes).To(Equal(1)) + + tx, ok = testee.Next() + Expect(ok).To(BeFalse()) + Expect(tx).To(Equal(startAt)) + Expect(callbackTimes).To(Equal(1)) + }) + +}) diff --git a/core/coreutil/waiter.go b/core/coreutil/waiter.go new file mode 100644 index 000000000..dbebba4b4 --- /dev/null +++ b/core/coreutil/waiter.go @@ -0,0 +1,76 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package coreutil + +import ( + "context" + "time" + + "github.com/yandex/pandora/core" +) + +// Waiter goroutine unsafe wrapper for efficient waiting schedule. +type Waiter struct { + sched core.Schedule + ctx context.Context + + // Lazy initialized. + timer *time.Timer + lastNow time.Time +} + +func NewWaiter(sched core.Schedule, ctx context.Context) *Waiter { + return &Waiter{sched: sched, ctx: ctx} +} + +// Wait waits for next waiter schedule event. +// Returns true, if event successfully waited, or false +// if waiter context is done, or schedule finished. +func (w *Waiter) Wait() (ok bool) { + // Check, that context is not done. Very quick: 5 ns for op, due to benchmark. + select { + case <-w.ctx.Done(): + return false + default: + } + next, ok := w.sched.Next() + if !ok { + return false + } + // Get current time lazily. + // For once schedule, for example, we need to get it only once. + if next.Before(w.lastNow) { + return true + } + w.lastNow = time.Now() + waitFor := next.Sub(w.lastNow) + if waitFor <= 0 { + return true + } + // Lazy init. We don't need timer for unlimited and once schedule. + if w.timer == nil { + w.timer = time.NewTimer(waitFor) + } else { + w.timer.Reset(waitFor) + } + select { + case _ = <-w.timer.C: + return true + case <-w.ctx.Done(): + return false + } +} + +// IsFinished is quick check, that wait context is not canceled and there are some tokens left in +// schedule. +func (w *Waiter) IsFinished() (ok bool) { + select { + case <-w.ctx.Done(): + return true + default: + return w.sched.Left() == 0 + } +} diff --git a/core/coreutil/waiter_test.go b/core/coreutil/waiter_test.go new file mode 100644 index 000000000..4cb0309e8 --- /dev/null +++ b/core/coreutil/waiter_test.go @@ -0,0 +1,71 @@ +// Copyright (c) 2018 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package coreutil + +import ( + "context" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/yandex/pandora/core/schedule" +) + +var _ = Describe("waiter", func() { + + It("unstarted", func() { + sched := schedule.NewOnce(1) + ctx := context.Background() + w := NewWaiter(sched, ctx) + var i int + for ; w.Wait(); i++ { + } + Expect(i).To(BeEquivalentTo(1)) + }) + + It("wait as expected", func() { + const ( + duration = 100 * time.Millisecond + ops = 100 + times = ops * duration / time.Second + ) + sched := schedule.NewConst(ops, duration) + ctx := context.Background() + w := NewWaiter(sched, ctx) + start := time.Now() + sched.Start(start) + var i int + for ; w.Wait(); i++ { + } + finish := time.Now() + Expect(i).To(BeEquivalentTo(times)) + Expect(finish.Sub(start)).To(BeNumerically(">=", duration*(times-1)/times)) + Expect(finish.Sub(start)).To(BeNumerically("<", 3*duration)) // Smaller interval will be more flaky. + }) + + It("context canceled before wait", func() { + sched := schedule.NewOnce(1) + ctx, cancel := context.WithCancel(context.Background()) + cancel() + w := NewWaiter(sched, ctx) + Expect(w.Wait()).To(BeFalse()) + }) + + It("context canceled during wait", func() { + sched := schedule.NewConstConf(schedule.ConstConfig{Ops: 0.1, Duration: 100 * time.Second}) + timeout := 20 * time.Millisecond + start := time.Now() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + w := NewWaiter(sched, ctx) + Expect(w.Wait()).To(BeTrue()) // 0 + Expect(w.Wait()).To(BeFalse()) + Expect(time.Since(start)).To(BeNumerically(">", timeout)) + Expect(time.Since(start)).To(BeNumerically("<", 10*timeout)) + }) + +}) diff --git a/core/datasink/file.go b/core/datasink/file.go new file mode 100644 index 000000000..7ea859b48 --- /dev/null +++ b/core/datasink/file.go @@ -0,0 +1,50 @@ +// Copyright (c) 2018 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package datasink + +import ( + "io" + "os" + + "github.com/spf13/afero" + + "github.com/yandex/pandora/core" +) + +// TODO(skipor): gzip on flag + +type FileConfig struct { + Path string `config:"path" validate:"required"` +} + +func NewFile(fs afero.Fs, conf FileConfig) core.DataSink { + return &fileSink{afero.Afero{fs}, conf} +} + +type fileSink struct { + fs afero.Afero + conf FileConfig +} + +func (s *fileSink) OpenSink() (wc io.WriteCloser, err error) { + return s.fs.OpenFile(s.conf.Path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) +} + +func NewStdout() core.DataSink { + return hideCloseFileSink{os.Stdout} +} + +func NewStderr() core.DataSink { + return hideCloseFileSink{os.Stderr} +} + +type hideCloseFileSink struct{ afero.File } + +func (f hideCloseFileSink) OpenSink() (wc io.WriteCloser, err error) { + return f, nil +} + +func (f hideCloseFileSink) Close() error { return nil } diff --git a/core/datasink/file_test.go b/core/datasink/file_test.go new file mode 100644 index 000000000..018ba9720 --- /dev/null +++ b/core/datasink/file_test.go @@ -0,0 +1,30 @@ +// Copyright (c) 2018 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package datasink + +import ( + "os" + "testing" + + "github.com/spf13/afero" + + "github.com/yandex/pandora/core/coretest" +) + +func TestFileSink(t *testing.T) { + const filename = "/xxx/yyy" + fs := afero.NewMemMapFs() + sink := NewFile(fs, FileConfig{Path: filename}) + coretest.AssertSinkEqualFile(t, fs, filename, sink) +} + +func TestStdout(t *testing.T) { + coretest.AssertSinkEqualStdStream(t, &os.Stdout, NewStdout) +} + +func TestStderr(t *testing.T) { + coretest.AssertSinkEqualStdStream(t, &os.Stderr, NewStderr) +} diff --git a/core/datasink/std.go b/core/datasink/std.go new file mode 100644 index 000000000..cfae62f7d --- /dev/null +++ b/core/datasink/std.go @@ -0,0 +1,29 @@ +// Copyright (c) 2018 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package datasink + +import ( + "bytes" + "io" + + "github.com/yandex/pandora/core" + "github.com/yandex/pandora/lib/ioutil2" +) + +type Buffer struct { + bytes.Buffer + ioutil2.NopCloser +} + +var _ core.DataSink = &Buffer{} + +func (b *Buffer) OpenSink() (wc io.WriteCloser, err error) { + return b, nil +} + +func NewBuffer() *Buffer { + return &Buffer{} +} diff --git a/core/datasource/file.go b/core/datasource/file.go new file mode 100644 index 000000000..ab6f4ce1b --- /dev/null +++ b/core/datasource/file.go @@ -0,0 +1,46 @@ +// Copyright (c) 2018 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package datasource + +import ( + "io" + "os" + + "github.com/spf13/afero" + + "github.com/yandex/pandora/core" +) + +// TODO(skipor): auto unzip with option to turn this behaviour off. + +type FileConfig struct { + Path string `config:"path" validate:"required"` +} + +func NewFile(fs afero.Fs, conf FileConfig) core.DataSource { + return &fileSource{afero.Afero{fs}, conf} +} + +type fileSource struct { + fs afero.Afero + conf FileConfig +} + +func (s *fileSource) OpenSource() (wc io.ReadCloser, err error) { + return s.fs.Open(s.conf.Path) +} + +func NewStdin() core.DataSource { + return hideCloseFileSource{os.Stdin} +} + +type hideCloseFileSource struct{ afero.File } + +func (f hideCloseFileSource) OpenSource() (wc io.ReadCloser, err error) { + return f, nil +} + +func (f hideCloseFileSource) Close() error { return nil } diff --git a/core/datasource/file_test.go b/core/datasource/file_test.go new file mode 100644 index 000000000..714655a27 --- /dev/null +++ b/core/datasource/file_test.go @@ -0,0 +1,25 @@ +// Copyright (c) 2018 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package datasource + +import ( + "os" + "testing" + + "github.com/spf13/afero" + "github.com/yandex/pandora/core/coretest" +) + +func TestFileSource(t *testing.T) { + const filename = "/xxx/yyy" + fs := afero.NewMemMapFs() + source := NewFile(fs, FileConfig{Path: filename}) + coretest.AssertSourceEqualFile(t, fs, filename, source) +} + +func TestStdin(t *testing.T) { + coretest.AssertSourceEqualStdStream(t, &os.Stdout, NewStdin) +} diff --git a/core/datasource/std.go b/core/datasource/std.go new file mode 100644 index 000000000..a9f0aa222 --- /dev/null +++ b/core/datasource/std.go @@ -0,0 +1,78 @@ +// Copyright (c) 2018 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package datasource + +import ( + "bytes" + "io" + "io/ioutil" + "strings" + + "github.com/yandex/pandora/core" + "github.com/yandex/pandora/lib/ioutil2" +) + +func NewBuffer(buf *bytes.Buffer) core.DataSource { + return buffer{Buffer: buf} +} + +type buffer struct { + *bytes.Buffer + ioutil2.NopCloser +} + +func (b buffer) OpenSource() (wc io.ReadCloser, err error) { + return b, nil +} + +// NewReader returns dummy core.DataSource that returns it on OpenSource call, wrapping it +// ioutil.NopCloser if r is not io.Closer. +// NOTE(skipor): such wrapping hides Seek and other methods that can be used. +func NewReader(r io.Reader) core.DataSource { + return &readerSource{r} +} + +type readerSource struct { + source io.Reader +} + +func (r *readerSource) OpenSource() (rc io.ReadCloser, err error) { + if rc, ok := r.source.(io.ReadCloser); ok { + return rc, nil + } + // Need to add io.Closer, but don't want to hide seeker. + rs, ok := r.source.(io.ReadSeeker) + if ok { + return &struct { + io.ReadSeeker + ioutil2.NopCloser + }{ReadSeeker: rs}, nil + } + return ioutil.NopCloser(r.source), nil +} + +func NewString(s string) core.DataSource { + return &stringSource{Reader: strings.NewReader(s)} +} + +type stringSource struct { + *strings.Reader + ioutil2.NopCloser +} + +func (s stringSource) OpenSource() (rc io.ReadCloser, err error) { + return s, nil +} + +type InlineConfig struct { + Data string `validate:"required"` +} + +func NewInline(conf InlineConfig) core.DataSource { + return NewString(conf.Data) +} + +// TODO(skipor): InMemory DataSource, that reads all nested source data in open to buffer. diff --git a/core/engine/engine.go b/core/engine/engine.go new file mode 100644 index 000000000..2e2ef4a47 --- /dev/null +++ b/core/engine/engine.go @@ -0,0 +1,423 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package engine + +import ( + "context" + "fmt" + "sync" + + "github.com/pkg/errors" + "go.uber.org/zap" + + "github.com/yandex/pandora/core" + "github.com/yandex/pandora/core/coreutil" + "github.com/yandex/pandora/lib/errutil" + "github.com/yandex/pandora/lib/monitoring" +) + +type Config struct { + Pools []InstancePoolConfig `config:"pools" validate:"required,dive"` +} + +type InstancePoolConfig struct { + Id string + Provider core.Provider `config:"ammo" validate:"required"` + Aggregator core.Aggregator `config:"result" validate:"required"` + NewGun func() (core.Gun, error) `config:"gun" validate:"required"` + RPSPerInstance bool `config:"rps-per-instance"` + NewRPSSchedule func() (core.Schedule, error) `config:"rps" validate:"required"` + StartupSchedule core.Schedule `config:"startup" validate:"required"` +} + +// TODO(skipor): use something github.com/rcrowley/go-metrics based. +// Its high level primitives like Meter can be not fast enough, but EWMAs +// and Counters should good for that. +type Metrics struct { + Request *monitoring.Counter + Response *monitoring.Counter + InstanceStart *monitoring.Counter + InstanceFinish *monitoring.Counter +} + +func New(log *zap.Logger, m Metrics, conf Config) *Engine { + return &Engine{log: log, config: conf, metrics: m} +} + +type Engine struct { + log *zap.Logger + config Config + metrics Metrics + wait sync.WaitGroup +} + +// Run runs all instance pools. Run blocks until fail happen, or all pools +// subroutines are successfully finished. +// Ctx will be ancestor to Contexts passed to AmmoQueue, Gun and Aggregator. +// That's ctx cancel cancels shooting and it's Context values can be used for communication between plugins. +func (e *Engine) Run(ctx context.Context) error { + ctx, cancel := context.WithCancel(ctx) + defer func() { + e.log.Info("Engine finished") + cancel() + }() + + runRes := make(chan poolRunResult, 1) + for i, conf := range e.config.Pools { + if conf.Id == "" { + conf.Id = fmt.Sprintf("pool_%v", i) + } + e.wait.Add(1) + pool := newPool(e.log, e.metrics, e.wait.Done, conf) + go func() { + err := pool.Run(ctx) + select { + case runRes <- poolRunResult{pool.Id, err}: + case <-ctx.Done(): + pool.log.Info("Pool run result suppressed", + zap.String("id", pool.Id), zap.Error(err)) + } + }() + } + + for i := 0; i < len(e.config.Pools); i++ { + select { + case res := <-runRes: + e.log.Debug("Pool awaited", zap.Int("awaited", i), + zap.String("id", res.Id), zap.Error(res.Err)) + if res.Err != nil { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + return errors.WithMessage(res.Err, fmt.Sprintf("%q pool run failed", res.Id)) + } + case <-ctx.Done(): + e.log.Info("Engine run canceled") + return ctx.Err() + } + } + return nil +} + +// Wait blocks until all run engine tasks are finished. +// Useful only in case of fail, because successful run awaits all started tasks. +func (e *Engine) Wait() { + e.wait.Wait() +} + +func newPool(log *zap.Logger, m Metrics, onWaitDone func(), conf InstancePoolConfig) *instancePool { + log = log.With(zap.String("pool", conf.Id)) + return &instancePool{log, m, onWaitDone, conf} +} + +type instancePool struct { + log *zap.Logger + metrics Metrics + onWaitDone func() + InstancePoolConfig +} + +// Run start instance pool. Run blocks until fail happen, or all instances finish. +// What's going on: +// AmmoQueue and Aggregator are started in separate goroutines. +// Instances create due to schedule is started in separate goroutine. +// Every new instance started in separate goroutine. +// When all instances are finished, Aggregator and AmmoQueue contexts are canceled, +// and their execution results are awaited. +// If error happen or Run context has been canceled, Run returns non-nil error immediately, +// remaining results awaiting goroutine in background, that will call onWaitDone callback, +// when all started subroutines will be finished. +func (p *instancePool) Run(ctx context.Context) error { + originalCtx := ctx // Canceled only in case of other pool fail. + p.log.Info("Pool run started") + ctx, cancel := context.WithCancel(ctx) + defer func() { + p.log.Info("Pool run finished") + cancel() + }() + + rh, err := p.runAsync(ctx) + if err != nil { + return err + } + + awaitErr := p.awaitRunAsync(rh) + + select { + case <-originalCtx.Done(): + p.log.Info("Pool execution canceled") + return ctx.Err() + case err, ok := <-awaitErr: + if ok { + p.log.Info("Pool failed. Canceling started tasks", zap.Error(err)) + return err + } + p.log.Info("Pool run finished successfully") + return nil + } +} + +type poolAsyncRunHandle struct { + runCtx context.Context + runCancel context.CancelFunc + instanceStartCtx context.Context + instanceStartCancel context.CancelFunc + + providerErr <-chan error + aggregatorErr <-chan error + startRes <-chan startResult + // Read only actually. But can be closed by reader, to be sure, that no result has been lost. + runRes chan instanceRunResult +} + +func (p *instancePool) runAsync(runCtx context.Context) (*poolAsyncRunHandle, error) { + // Canceled in case all instances finish, fail or run runCancel. + runCtx, runCancel := context.WithCancel(runCtx) + // Canceled also on out of ammo, and finish of shared RPS schedule. + instanceStartCtx, instanceStartCancel := context.WithCancel(runCtx) + newInstanceSchedule, err := p.buildNewInstanceSchedule(instanceStartCtx, instanceStartCancel) + if err != nil { + return nil, err + } + // Seems good enough. Even if some run will block on result send, it's not real problem. + const runResultBufSize = 64 + var ( + // All channels are buffered. All results should be read. + providerErr = make(chan error, 1) + aggregatorErr = make(chan error, 1) + startRes = make(chan startResult, 1) + runRes = make(chan instanceRunResult, runResultBufSize) + ) + go func() { + deps := core.ProviderDeps{p.log} + providerErr <- p.Provider.Run(runCtx, deps) + }() + go func() { + deps := core.AggregatorDeps{p.log} + aggregatorErr <- p.Aggregator.Run(runCtx, deps) + }() + go func() { + started, err := p.startInstances(instanceStartCtx, runCtx, newInstanceSchedule, runRes) + startRes <- startResult{started, err} + }() + return &poolAsyncRunHandle{ + runCtx: runCtx, + runCancel: runCancel, + instanceStartCtx: instanceStartCtx, + instanceStartCancel: instanceStartCancel, + providerErr: providerErr, + aggregatorErr: aggregatorErr, + runRes: runRes, + startRes: startRes, + }, nil +} + +func (p *instancePool) awaitRunAsync(runHandle *poolAsyncRunHandle) <-chan error { + ah, awaitErr := p.newAwaitRunHandle(runHandle) + go func() { + defer func() { + ah.log.Debug("Pool wait finished") + close(ah.awaitErr) + if p.onWaitDone != nil { + p.onWaitDone() + } + }() + ah.awaitRun() + }() + return awaitErr +} + +type runAwaitHandle struct { + log *zap.Logger + poolAsyncRunHandle + awaitErr chan<- error + toWait int + startedInstances int + awaitedInstances int +} + +func (p *instancePool) newAwaitRunHandle(runHandle *poolAsyncRunHandle) (*runAwaitHandle, <-chan error) { + awaitErr := make(chan error) + const resultsToWait = 4 // AmmoQueue, Aggregator, instance start, instance run. + awaitHandle := &runAwaitHandle{ + log: p.log, + poolAsyncRunHandle: *runHandle, + awaitErr: awaitErr, + toWait: resultsToWait, + startedInstances: -1, // Undefined until start finish. + } + return awaitHandle, awaitErr +} + +func (ah *runAwaitHandle) awaitRun() { + for ah.toWait > 0 { + select { + case err := <-ah.providerErr: + ah.providerErr = nil + // TODO(skipor): not wait for provider, to return success result? + ah.toWait-- + ah.log.Debug("AmmoQueue awaited", zap.Error(err)) + if errutil.IsNotCtxError(ah.runCtx, err) { + ah.onErrAwaited(errors.WithMessage(err, "provider failed")) + } + if err == nil && !ah.isStartFinished() { + ah.log.Debug("Canceling instance start because out of ammo") + ah.instanceStartCancel() + } + case err := <-ah.aggregatorErr: + ah.aggregatorErr = nil + ah.toWait-- + ah.log.Debug("Aggregator awaited", zap.Error(err)) + if errutil.IsNotCtxError(ah.runCtx, err) { + ah.onErrAwaited(errors.WithMessage(err, "aggregator failed")) + } + case res := <-ah.startRes: + ah.startRes = nil + ah.toWait-- + ah.startedInstances = res.Started + ah.log.Debug("Instances start awaited", zap.Int("started", ah.startedInstances), zap.Error(res.Err)) + if errutil.IsNotCtxError(ah.instanceStartCtx, res.Err) { + ah.onErrAwaited(errors.WithMessage(res.Err, "instances start failed")) + } + ah.checkAllInstancesAreFinished() // There is a race between run and start results. + case res := <-ah.runRes: + ah.awaitedInstances++ + if ent := ah.log.Check(zap.DebugLevel, "Instance run awaited"); ent != nil { + ent.Write(zap.Int("id", res.Id), zap.Int("awaited", ah.awaitedInstances), zap.Error(res.Err)) + } + if errutil.IsNotCtxError(ah.runCtx, res.Err) { + ah.onErrAwaited(errors.WithMessage(res.Err, fmt.Sprintf("instance %q run failed", res.Id))) + } + ah.checkAllInstancesAreFinished() + } + } +} + +func (ah *runAwaitHandle) onErrAwaited(err error) { + select { + case ah.awaitErr <- err: + case <-ah.runCtx.Done(): + if err != ah.runCtx.Err() { + ah.log.Debug("Error suppressed after run cancel", zap.Error(err)) + } + } +} + +func (ah *runAwaitHandle) checkAllInstancesAreFinished() { + allFinished := ah.isStartFinished() && ah.awaitedInstances >= ah.startedInstances + if !allFinished { + return + } + // Assert, that all run results are awaited. + close(ah.runRes) + res, ok := <-ah.runRes + if ok { + ah.log.Panic("Unexpected run result", zap.Any("res", res)) + } + + ah.runRes = nil + ah.toWait-- + ah.log.Info("All instances runs awaited.", zap.Int("awaited", ah.awaitedInstances)) + ah.runCancel() // Signal to provider and aggregator, that pool run is finished. + +} + +func (ah *runAwaitHandle) isStartFinished() bool { + return ah.startRes == nil +} + +func (p *instancePool) startInstances( + startCtx, runCtx context.Context, + newInstanceSchedule func() (core.Schedule, error), + runRes chan<- instanceRunResult) (started int, err error) { + deps := instanceDeps{ + p.Aggregator, + newInstanceSchedule, + p.NewGun, + instanceSharedDeps{p.Provider, p.metrics}, + } + + waiter := coreutil.NewWaiter(p.StartupSchedule, startCtx) + + // If create all instances asynchronously, and creation will fail, too many errors appears in log. + ok := waiter.Wait() + if !ok { + err = startCtx.Err() + return + } + firstInstance, err := newInstance(runCtx, p.log, 0, deps) + if err != nil { + return + } + started++ + go func() { + defer firstInstance.Close() + runRes <- instanceRunResult{0, firstInstance.Run(runCtx)} + }() + + for ; waiter.Wait(); started++ { + id := started + go func() { + runRes <- instanceRunResult{id, runNewInstance(runCtx, p.log, id, deps)} + }() + } + err = startCtx.Err() + return +} + +func (p *instancePool) buildNewInstanceSchedule(startCtx context.Context, cancelStart context.CancelFunc) ( + newInstanceSchedule func() (core.Schedule, error), err error, +) { + if p.RPSPerInstance { + newInstanceSchedule = p.NewRPSSchedule + return + } + var sharedRPSSchedule core.Schedule + sharedRPSSchedule, err = p.NewRPSSchedule() + if err != nil { + return + } + sharedRPSSchedule = coreutil.NewCallbackOnFinishSchedule(sharedRPSSchedule, func() { + select { + case <-startCtx.Done(): + p.log.Debug("RPS schedule has been finished") + return + default: + p.log.Info("RPS schedule has been finished. Canceling instance start.") + cancelStart() + } + }) + newInstanceSchedule = func() (core.Schedule, error) { + return sharedRPSSchedule, err + } + return +} + +func runNewInstance(ctx context.Context, log *zap.Logger, id int, deps instanceDeps) error { + instance, err := newInstance(ctx, log, id, deps) + if err != nil { + return err + } + defer instance.Close() + return instance.Run(ctx) +} + +type poolRunResult struct { + Id string + Err error +} + +type instanceRunResult struct { + Id int + Err error +} + +type startResult struct { + Started int + Err error +} diff --git a/core/engine/engine_suite_test.go b/core/engine/engine_suite_test.go new file mode 100644 index 000000000..752baf610 --- /dev/null +++ b/core/engine/engine_suite_test.go @@ -0,0 +1,21 @@ +package engine + +import ( + "testing" + + "github.com/yandex/pandora/lib/ginkgoutil" + "github.com/yandex/pandora/lib/monitoring" +) + +func TestEngine(t *testing.T) { + ginkgoutil.RunSuite(t, "Engine Suite") +} + +func newTestMetrics() Metrics { + return Metrics{ + &monitoring.Counter{}, + &monitoring.Counter{}, + &monitoring.Counter{}, + &monitoring.Counter{}, + } +} diff --git a/core/engine/engine_test.go b/core/engine/engine_test.go new file mode 100644 index 000000000..187434156 --- /dev/null +++ b/core/engine/engine_test.go @@ -0,0 +1,377 @@ +package engine + +import ( + "context" + "sync" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/pkg/errors" + "github.com/stretchr/testify/mock" + "go.uber.org/atomic" + + "github.com/yandex/pandora/core" + "github.com/yandex/pandora/core/aggregator" + "github.com/yandex/pandora/core/config" + coremock "github.com/yandex/pandora/core/mocks" + "github.com/yandex/pandora/core/provider" + "github.com/yandex/pandora/core/schedule" + "github.com/yandex/pandora/lib/ginkgoutil" +) + +var _ = Describe("config validation", func() { + It("dive validation", func() { + conf := Config{ + Pools: []InstancePoolConfig{ + {}, + }, + } + err := config.Validate(conf) + Expect(err).To(HaveOccurred()) + }) + + It("pools required", func() { + conf := Config{} + err := config.Validate(conf) + Expect(err).To(HaveOccurred()) + }) + +}) + +func newTestPoolConf() (InstancePoolConfig, *coremock.Gun) { + gun := &coremock.Gun{} + gun.On("Bind", mock.Anything, mock.Anything).Return(nil) + gun.On("Shoot", mock.Anything) + conf := InstancePoolConfig{ + Provider: provider.NewNum(-1), + Aggregator: aggregator.NewTest(), + NewGun: func() (core.Gun, error) { + return gun, nil + }, + NewRPSSchedule: func() (core.Schedule, error) { + return schedule.NewOnce(1), nil + }, + StartupSchedule: schedule.NewOnce(1), + } + return conf, gun +} + +var _ = Describe("instance pool", func() { + var ( + gun *coremock.Gun + conf InstancePoolConfig + ctx context.Context + cancel context.CancelFunc + + waitDoneCalled atomic.Bool + onWaitDone func() + + p *instancePool + ) + + // Conf for starting only instance. + BeforeEach(func() { + conf, gun = newTestPoolConf() + onWaitDone = func() { + old := waitDoneCalled.Swap(true) + if old { + panic("double on wait done call") + } + } + waitDoneCalled.Store(false) + ctx, cancel = context.WithCancel(context.Background()) + }) + + JustBeforeEach(func() { + metrics := newTestMetrics() + p = newPool(ginkgoutil.NewLogger(), metrics, onWaitDone, conf) + }) + + Context("shoot ok", func() { + It("", func() { + err := p.Run(ctx) + Expect(err).To(BeNil()) + ginkgoutil.AssertExpectations(gun) + Expect(waitDoneCalled.Load()).To(BeTrue()) + }, 1) + }) + + Context("context canceled", func() { + var ( + blockShoot sync.WaitGroup + ) + BeforeEach(func() { + blockShoot.Add(1) + prov := &coremock.Provider{} + prov.On("Run", mock.Anything, mock.Anything). + Return(func(startCtx context.Context, deps core.ProviderDeps) error { + <-startCtx.Done() + return nil + }) + prov.On("Acquire").Return(func() (core.Ammo, bool) { + cancel() + blockShoot.Wait() + return struct{}{}, true + }) + conf.Provider = prov + }) + It("", func() { + err := p.Run(ctx) + Expect(err).To(Equal(context.Canceled)) + ginkgoutil.AssertNotCalled(gun, "Shoot") + Expect(waitDoneCalled.Load()).To(BeFalse()) + blockShoot.Done() + Eventually(waitDoneCalled.Load).Should(BeTrue()) + }, 1) + }) + + Context("provider failed", func() { + var ( + failErr = errors.New("test err") + blockShootAndAggr sync.WaitGroup + ) + BeforeEach(func() { + blockShootAndAggr.Add(1) + prov := &coremock.Provider{} + prov.On("Run", mock.Anything, mock.Anything). + Return(func(context.Context, core.ProviderDeps) error { + return failErr + }) + prov.On("Acquire").Return(func() (core.Ammo, bool) { + blockShootAndAggr.Wait() + return nil, false + }) + conf.Provider = prov + aggr := &coremock.Aggregator{} + aggr.On("Run", mock.Anything, mock.Anything). + Return(func(context.Context, core.AggregatorDeps) error { + blockShootAndAggr.Wait() + return nil + }) + conf.Aggregator = aggr + }) + It("", func() { + err := p.Run(ctx) + Expect(err).ToNot(BeNil()) + Expect(err.Error()).To(ContainSubstring(failErr.Error())) + ginkgoutil.AssertNotCalled(gun, "Shoot") + Consistently(waitDoneCalled.Load, 0.1).Should(BeFalse()) + blockShootAndAggr.Done() + Eventually(waitDoneCalled.Load).Should(BeTrue()) + }) + }) + + Context("aggregator failed", func() { + failErr := errors.New("test err") + BeforeEach(func() { + aggr := &coremock.Aggregator{} + aggr.On("Run", mock.Anything, mock.Anything).Return(failErr) + conf.Aggregator = aggr + }) + It("", func() { + err := p.Run(ctx) + Expect(err).ToNot(BeNil()) + Expect(err.Error()).To(ContainSubstring(failErr.Error())) + Eventually(waitDoneCalled.Load).Should(BeTrue()) + }, 1) + }) + + Context("start instances failed", func() { + failErr := errors.New("test err") + BeforeEach(func() { + conf.NewGun = func() (core.Gun, error) { + return nil, failErr + } + }) + It("", func() { + err := p.Run(ctx) + Expect(err).ToNot(BeNil()) + Expect(err.Error()).To(ContainSubstring(failErr.Error())) + Eventually(waitDoneCalled.Load).Should(BeTrue()) + }, 1) + }) + +}) + +var _ = Describe("multiple instance", func() { + It("out of ammo - instance start is canceled", func() { + conf, _ := newTestPoolConf() + conf.Provider = provider.NewNum(3) + conf.NewRPSSchedule = func() (core.Schedule, error) { + return schedule.NewUnlimited(time.Hour), nil + } + conf.StartupSchedule = schedule.NewComposite( + schedule.NewOnce(2), + schedule.NewConst(1, 5*time.Second), + ) + pool := newPool(ginkgoutil.NewLogger(), newTestMetrics(), nil, conf) + ctx := context.Background() + + err := pool.Run(ctx) + Expect(err).NotTo(HaveOccurred()) + Expect(pool.metrics.InstanceStart.Get()).To(BeNumerically("<=", 3)) + }, 1) + + It("out of RPS - instance start is canceled", func() { + conf, _ := newTestPoolConf() + conf.NewRPSSchedule = func() (core.Schedule, error) { + return schedule.NewOnce(5), nil + } + conf.StartupSchedule = schedule.NewComposite( + schedule.NewOnce(2), + schedule.NewConst(1, 2*time.Second), + ) + pool := newPool(ginkgoutil.NewLogger(), newTestMetrics(), nil, conf) + ctx := context.Background() + + err := pool.Run(ctx) + Expect(err).NotTo(HaveOccurred()) + Expect(pool.metrics.InstanceStart.Get()).To(BeNumerically("<=", 3)) + }) + +}) + +// TODO instance start canceled after out of ammo +// TODO instance start cancdled after RPS finish + +var _ = Describe("engine", func() { + var ( + gun1, gun2 *coremock.Gun + confs []InstancePoolConfig + ctx context.Context + cancel context.CancelFunc + engine *Engine + ) + BeforeEach(func() { + confs = make([]InstancePoolConfig, 2) + confs[0], gun1 = newTestPoolConf() + confs[1], gun2 = newTestPoolConf() + ctx, cancel = context.WithCancel(context.Background()) + }) + + JustBeforeEach(func() { + metrics := newTestMetrics() + engine = New(ginkgoutil.NewLogger(), metrics, Config{confs}) + }) + + Context("shoot ok", func() { + It("", func() { + err := engine.Run(ctx) + Expect(err).To(BeNil()) + ginkgoutil.AssertExpectations(gun1, gun2) + }) + }) + + Context("context canceled", func() { + // Cancel context on ammo acquire, an check that engine returns before + // instance finish. + var ( + blockPools sync.WaitGroup + ) + BeforeEach(func() { + blockPools.Add(1) + for i := range confs { + prov := &coremock.Provider{} + prov.On("Run", mock.Anything, mock.Anything). + Return(func(startCtx context.Context, deps core.ProviderDeps) error { + <-startCtx.Done() + blockPools.Wait() + return nil + }) + prov.On("Acquire").Return(func() (core.Ammo, bool) { + cancel() + blockPools.Wait() + return struct{}{}, true + }) + confs[i].Provider = prov + } + }) + + It("", func() { + err := engine.Run(ctx) + Expect(err).To(Equal(context.Canceled)) + awaited := make(chan struct{}) + go func() { + defer close(awaited) + engine.Wait() + }() + Consistently(awaited, 0.1).ShouldNot(BeClosed()) + blockPools.Done() + Eventually(awaited).Should(BeClosed()) + }) + }) + + Context("one pool failed", func() { + var ( + failErr = errors.New("test err") + ) + BeforeEach(func() { + aggr := &coremock.Aggregator{} + aggr.On("Run", mock.Anything, mock.Anything).Return(failErr) + confs[0].Aggregator = aggr + }) + + It("", func() { + err := engine.Run(ctx) + Expect(err).ToNot(BeNil()) + Expect(err.Error()).To(ContainSubstring(failErr.Error())) + engine.Wait() + }, 1) + }) +}) + +var _ = Describe("build instance schedule", func() { + It("per instance schedule ", func() { + conf, _ := newTestPoolConf() + conf.RPSPerInstance = true + pool := newPool(ginkgoutil.NewLogger(), newTestMetrics(), nil, conf) + newInstanceSchedule, err := pool.buildNewInstanceSchedule(context.Background(), func() { + Fail("should not be called") + }) + Expect(err).NotTo(HaveOccurred()) + ginkgoutil.ExpectFuncsEqual(newInstanceSchedule, conf.NewRPSSchedule) + }) + + It("shared schedule create failed", func() { + conf, _ := newTestPoolConf() + scheduleCreateErr := errors.New("test err") + conf.NewRPSSchedule = func() (core.Schedule, error) { + return nil, scheduleCreateErr + } + pool := newPool(ginkgoutil.NewLogger(), newTestMetrics(), nil, conf) + newInstanceSchedule, err := pool.buildNewInstanceSchedule(context.Background(), func() { + Fail("should not be called") + }) + Expect(err).To(Equal(scheduleCreateErr)) + Expect(newInstanceSchedule).To(BeNil()) + }) + + It("shared schedule work", func() { + conf, _ := newTestPoolConf() + var newScheduleCalled bool + conf.NewRPSSchedule = func() (core.Schedule, error) { + Expect(newScheduleCalled).To(BeFalse()) + newScheduleCalled = true + return schedule.NewOnce(1), nil + } + pool := newPool(ginkgoutil.NewLogger(), newTestMetrics(), nil, conf) + ctx, cancel := context.WithCancel(context.Background()) + newInstanceSchedule, err := pool.buildNewInstanceSchedule(context.Background(), cancel) + Expect(err).NotTo(HaveOccurred()) + + schedule, err := newInstanceSchedule() + Expect(err).NotTo(HaveOccurred()) + + Expect(newInstanceSchedule()).To(Equal(schedule)) + + Expect(ctx.Done()).NotTo(BeClosed()) + _, ok := schedule.Next() + Expect(ok).To(BeTrue()) + Expect(ctx.Done()).NotTo(BeClosed()) + _, ok = schedule.Next() + Expect(ok).To(BeFalse()) + Expect(ctx.Done()).To(BeClosed()) + }) + +}) diff --git a/core/engine/instance.go b/core/engine/instance.go new file mode 100644 index 000000000..fe93b6c5f --- /dev/null +++ b/core/engine/instance.go @@ -0,0 +1,117 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package engine + +import ( + "context" + "io" + + "github.com/pkg/errors" + "go.uber.org/zap" + + "github.com/yandex/pandora/core" + "github.com/yandex/pandora/core/coreutil" + "github.com/yandex/pandora/lib/tag" +) + +type instance struct { + log *zap.Logger + id int + gun core.Gun + schedule core.Schedule + instanceSharedDeps +} + +func newInstance(ctx context.Context, log *zap.Logger, id int, deps instanceDeps) (*instance, error) { + log = log.With(zap.Int("instance", id)) + gunDeps := core.GunDeps{ctx, log, id} + sched, err := deps.newSchedule() + if err != nil { + return nil, err + } + gun, err := deps.newGun() + if err != nil { + return nil, err + } + err = gun.Bind(deps.aggregator, gunDeps) + if err != nil { + return nil, err + } + inst := &instance{log, id, gun, sched, deps.instanceSharedDeps} + return inst, nil +} + +type instanceDeps struct { + aggregator core.Aggregator + newSchedule func() (core.Schedule, error) + newGun func() (core.Gun, error) + instanceSharedDeps +} + +type instanceSharedDeps struct { + provider core.Provider + Metrics +} + +// Run blocks until ammo finish, error or context cancel. +// Expects, that gun is already bind. +func (i *instance) Run(ctx context.Context) error { + i.log.Debug("Instance started") + i.InstanceStart.Add(1) + defer func() { + defer i.log.Debug("Instance finished") + i.InstanceFinish.Add(1) + }() + + return i.shoot(ctx) +} + +func (i *instance) shoot(ctx context.Context) (err error) { + defer func() { + r := recover() + if r != nil { + err = errors.Errorf("shoot panic: %s", r) + } + }() + + waiter := coreutil.NewWaiter(i.schedule, ctx) + // Checking, that schedule is not finished, required, to not consume extra ammo, + // on finish in case of per instance schedule. + for !waiter.IsFinished() { + ammo, ok := i.provider.Acquire() + if !ok { + i.log.Debug("Out of ammo") + break + } + if tag.Debug { + i.log.Debug("Ammo acquired", zap.Any("ammo", ammo)) + } + if !waiter.Wait() { + break + } + i.Metrics.Request.Add(1) + if tag.Debug { + i.log.Debug("Shooting", zap.Any("ammo", ammo)) + } + i.gun.Shoot(ammo) + i.Metrics.Response.Add(1) + i.provider.Release(ammo) + } + return ctx.Err() +} + +func (i *instance) Close() error { + gunCloser, ok := i.gun.(io.Closer) + if !ok { + return nil + } + err := gunCloser.Close() + if err != nil { + i.log.Warn("Gun close fail", zap.Error(err)) + } + i.log.Debug("Gun closed") + return err +} diff --git a/core/engine/instance_test.go b/core/engine/instance_test.go new file mode 100644 index 000000000..23629705d --- /dev/null +++ b/core/engine/instance_test.go @@ -0,0 +1,182 @@ +package engine + +import ( + "context" + "errors" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/stretchr/testify/mock" + + "github.com/yandex/pandora/core" + coremock "github.com/yandex/pandora/core/mocks" + "github.com/yandex/pandora/core/schedule" + "github.com/yandex/pandora/lib/ginkgoutil" +) + +var _ = Describe("Instance", func() { + var ( + provider *coremock.Provider + aggregator *coremock.Aggregator + sched core.Schedule + newScheduleErr error + gun *coremock.Gun + newGunErr error + ctx context.Context + metrics Metrics + + ins *instance + insCreateErr error + + newSchedule func() (core.Schedule, error) + newGun func() (core.Gun, error) + ) + + BeforeEach(func() { + provider = &coremock.Provider{} + aggregator = &coremock.Aggregator{} + gun = &coremock.Gun{} + newGunErr = nil + sched = &coremock.Schedule{} + newScheduleErr = nil + ctx = context.Background() + metrics = newTestMetrics() + newSchedule = func() (core.Schedule, error) { return sched, newScheduleErr } + newGun = func() (core.Gun, error) { return gun, newGunErr } + }) + + JustBeforeEach(func() { + deps := instanceDeps{ + aggregator, + newSchedule, + newGun, + instanceSharedDeps{ + provider, + metrics, + }, + } + ins, insCreateErr = newInstance(ctx, ginkgoutil.NewLogger(), 0, deps) + }) + + AfterEach(func() { + if newGunErr == nil && newScheduleErr == nil { + Expect(metrics.InstanceStart.Get()).To(BeEquivalentTo(1)) + Expect(metrics.InstanceFinish.Get()).To(BeEquivalentTo(1)) + } + }) + + Context("all ok", func() { + BeforeEach(func() { + const times = 5 + sched = schedule.NewOnce(times) + gun.On("Bind", aggregator, mock.Anything).Return(nil).Once() + var acquired int + provider.On("Acquire").Return(func() (core.Ammo, bool) { + acquired++ + return acquired, true + }).Times(times) + for i := 1; i <= times; i++ { + gun.On("Shoot", i).Once() + provider.On("Release", i).Once() + } + }) + JustBeforeEach(func() { + Expect(insCreateErr).NotTo(HaveOccurred()) + }) + It("start ok", func() { + err := ins.Run(ctx) + Expect(err).NotTo(HaveOccurred()) + ginkgoutil.AssertExpectations(gun, provider) + }, 2) + + Context("gun implements io.Closer", func() { + var closeGun mockGunCloser + BeforeEach(func() { + closeGun = mockGunCloser{gun} + closeGun.On("Close").Return(nil) + newGun = func() (core.Gun, error) { + return closeGun, nil + } + }) + It("close called on instance close", func() { + err := ins.Run(ctx) + Expect(err).NotTo(HaveOccurred()) + ginkgoutil.AssertNotCalled(closeGun, "Close") + err = ins.Close() + Expect(err).NotTo(HaveOccurred()) + ginkgoutil.AssertExpectations(closeGun, provider) + }) + + }) + }) + + Context("context canceled after run", func() { + BeforeEach(func() { + ctx, _ = context.WithTimeout(ctx, 10*time.Millisecond) + sched := sched.(*coremock.Schedule) + sched.On("Next").Return(time.Now().Add(5*time.Second), true) + sched.On("Left").Return(1) + gun.On("Bind", aggregator, mock.Anything).Return(nil) + provider.On("Acquire").Return(struct{}{}, true) + }) + It("start fail", func() { + err := ins.Run(ctx) + Expect(err).To(Equal(context.DeadlineExceeded)) + ginkgoutil.AssertExpectations(gun, provider) + }, 2) + + }) + + Context("context canceled before run", func() { + BeforeEach(func() { + var cancel context.CancelFunc + ctx, cancel = context.WithCancel(ctx) + cancel() + gun.On("Bind", aggregator, mock.Anything).Return(nil) + }) + It("nothing acquired and schedule not started", func() { + err := ins.Run(ctx) + Expect(err).To(Equal(context.Canceled)) + ginkgoutil.AssertExpectations(gun, provider) + }, 2) + + }) + + Context("schedule create failed", func() { + BeforeEach(func() { + sched = nil + newScheduleErr = errors.New("test err") + }) + It("instance create failed", func() { + Expect(insCreateErr).To(Equal(newScheduleErr)) + }) + }) + + Context("gun create failed", func() { + BeforeEach(func() { + gun = nil + newGunErr = errors.New("test err") + }) + It("instance create failed", func() { + Expect(insCreateErr).To(Equal(newGunErr)) + }) + }) + +}) + +type mockGunCloser struct { + *coremock.Gun +} + +func (_m mockGunCloser) Close() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + return r0 +} diff --git a/core/import/import.go b/core/import/import.go new file mode 100644 index 000000000..3bf142802 --- /dev/null +++ b/core/import/import.go @@ -0,0 +1,239 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package coreimport + +import ( + "reflect" + + "github.com/spf13/afero" + "go.uber.org/zap" + + "github.com/yandex/pandora/core" + "github.com/yandex/pandora/core/aggregator" + "github.com/yandex/pandora/core/aggregator/netsample" + "github.com/yandex/pandora/core/config" + "github.com/yandex/pandora/core/datasink" + "github.com/yandex/pandora/core/datasource" + "github.com/yandex/pandora/core/plugin" + "github.com/yandex/pandora/core/plugin/pluginconfig" + "github.com/yandex/pandora/core/provider" + "github.com/yandex/pandora/core/register" + "github.com/yandex/pandora/core/schedule" + "github.com/yandex/pandora/lib/tag" +) + +const ( + fileDataKey = "file" + compositeScheduleKey = "composite" +) + +func Import(fs afero.Fs) { + + register.DataSink(fileDataKey, func(conf datasink.FileConfig) core.DataSink { + return datasink.NewFile(fs, conf) + }) + const ( + stdoutSinkKey = "stdout" + stderrSinkKey = "stderr" + ) + register.DataSink(stdoutSinkKey, datasink.NewStdout) + register.DataSink(stderrSinkKey, datasink.NewStderr) + AddSinkConfigHook(func(str string) (ok bool, pluginType string, _ map[string]interface{}) { + for _, key := range []string{stdoutSinkKey, stderrSinkKey} { + if str == key { + return true, key, nil + } + } + return + }) + + register.DataSource(fileDataKey, func(conf datasource.FileConfig) core.DataSource { + return datasource.NewFile(fs, conf) + }) + const ( + stdinSourceKey = "stdin" + ) + register.DataSource(stdinSourceKey, datasource.NewStdin) + AddSinkConfigHook(func(str string) (ok bool, pluginType string, _ map[string]interface{}) { + if str != stdinSourceKey { + return + } + return true, stdinSourceKey, nil + }) + register.DataSource("inline", datasource.NewInline) + + // NOTE(skipor): json provider SHOULD NOT used normally. Register your own, that will return + // type that you need, but untyped map. + RegisterCustomJSONProvider("json", func() core.Ammo { return map[string]interface{}{} }) + + register.Aggregator("phout", func(conf netsample.PhoutConfig) (core.Aggregator, error) { + a, err := netsample.NewPhout(fs, conf) + return netsample.WrapAggregator(a), err + }, netsample.DefaultPhoutConfig) + register.Aggregator("jsonlines", aggregator.NewJSONLinesAggregator, aggregator.DefaultJSONLinesAggregatorConfig) + register.Aggregator("json", aggregator.NewJSONLinesAggregator, aggregator.DefaultJSONLinesAggregatorConfig) // TODO(skipor): should be done via alias, but we don't have them yet + register.Aggregator("log", aggregator.NewLog) + register.Aggregator("discard", aggregator.NewDiscard) + + register.Limiter("line", schedule.NewLineConf) + register.Limiter("const", schedule.NewConstConf) + register.Limiter("once", schedule.NewOnceConf) + register.Limiter("unlimited", schedule.NewUnlimitedConf) + register.Limiter(compositeScheduleKey, schedule.NewCompositeConf) + + config.AddTypeHook(sinkStringHook) + config.AddTypeHook(scheduleSliceToCompositeConfigHook) + + // Required for decoding plugins. Need to be added after Composite Schedule hacky hook. + pluginconfig.AddHooks() +} + +var ( + scheduleType = plugin.PtrType((*core.Schedule)(nil)) + dataSinkType = plugin.PtrType((*core.DataSink)(nil)) + dataSourceType = plugin.PtrType((*core.DataSource)(nil)) +) + +func isPluginOrFactory(expectedPluginType, actualType reflect.Type) bool { + if actualType.Kind() != reflect.Interface && actualType.Kind() != reflect.Func { + return false + } + factoryPluginType, isPluginFactory := plugin.FactoryPluginType(actualType) + return actualType == expectedPluginType || isPluginFactory && factoryPluginType == expectedPluginType +} + +type PluginConfigStringHook func(str string) (ok bool, pluginType string, conf map[string]interface{}) + +var ( + dataSinkConfigHooks []PluginConfigStringHook + dataSourceConfigHooks []PluginConfigStringHook +) + +func AddSinkConfigHook(hook PluginConfigStringHook) { + dataSinkConfigHooks = append(dataSinkConfigHooks, hook) +} + +func AddSourceConfigHook(hook PluginConfigStringHook) { + dataSourceConfigHooks = append(dataSourceConfigHooks, hook) +} + +func RegisterCustomJSONProvider(name string, newAmmo func() core.Ammo) { + register.Provider(name, func(conf provider.JSONProviderConfig) core.Provider { + return provider.NewJSONProvider(newAmmo, conf) + }, provider.DefaultJSONProviderConfig) +} + +// sourceStringHook helps to decode string as core.DataSource plugin. +// Try use source hooks and use file as fallback. +func sourceStringHook(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if !isPluginOrFactory(dataSourceType, t) { + return data, nil + } + if tag.Debug { + zap.L().Debug("DataSource string hook triggered") + } + var ( + ok bool + pluginType string + conf map[string]interface{} + ) + dataStr := data.(string) + + for _, hook := range dataSourceConfigHooks { + ok, pluginType, conf = hook(dataStr) + zap.L().Debug("Source hooked", zap.String("plugin", pluginType)) + if ok { + break + } + } + + if !ok { + zap.L().Debug("Consider source as a file", zap.String("source", dataStr)) + pluginType = fileDataKey + conf = map[string]interface{}{ + "path": data, + } + } + + if conf == nil { + conf = make(map[string]interface{}) + } + conf[pluginconfig.PluginNameKey] = pluginType + + if tag.Debug { + zap.L().Debug("Hooked DataSource config", zap.Any("config", conf)) + } + return conf, nil +} + +// sinkStringHook helps to decode string as core.DataSink plugin. +// Try use sink hooks and use file as fallback. +func sinkStringHook(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if !isPluginOrFactory(dataSinkType, t) { + return data, nil + } + if tag.Debug { + zap.L().Debug("DataSink string hook triggered") + } + var ( + ok bool + pluginType string + conf map[string]interface{} + ) + dataStr := data.(string) + + for _, hook := range dataSinkConfigHooks { + ok, pluginType, conf = hook(dataStr) + zap.L().Debug("Sink hooked", zap.String("plugin", pluginType)) + if ok { + break + } + } + + if !ok { + zap.L().Debug("Consider sink as a file", zap.String("source", dataStr)) + pluginType = fileDataKey + conf = map[string]interface{}{ + "path": data, + } + } + + if conf == nil { + conf = make(map[string]interface{}) + } + conf[pluginconfig.PluginNameKey] = pluginType + + if tag.Debug { + zap.L().Debug("Hooked DataSink config", zap.Any("config", conf)) + } + return conf, nil +} + +// scheduleSliceToCompositeConfigHook helps to decode []interface{} as core.Schedule plugin. +func scheduleSliceToCompositeConfigHook(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.Slice { + return data, nil + } + if t.Kind() != reflect.Interface && t.Kind() != reflect.Func { + return data, nil + } + if !isPluginOrFactory(scheduleType, t) { + return data, nil + } + if tag.Debug { + zap.L().Debug("Composite schedule hook triggered") + } + return map[string]interface{}{ + pluginconfig.PluginNameKey: compositeScheduleKey, + "nested": data, + }, nil +} diff --git a/core/import/import_suite_test.go b/core/import/import_suite_test.go new file mode 100644 index 000000000..a39a2163d --- /dev/null +++ b/core/import/import_suite_test.go @@ -0,0 +1,149 @@ +package coreimport + +import ( + "context" + "os" + "testing" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/spf13/afero" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "github.com/yandex/pandora/core" + "github.com/yandex/pandora/core/config" + "github.com/yandex/pandora/core/coretest" + "github.com/yandex/pandora/core/plugin" + "github.com/yandex/pandora/lib/ginkgoutil" + "github.com/yandex/pandora/lib/testutil" +) + +func TestImport(t *testing.T) { + defer resetGlobals() + Import(afero.NewOsFs()) + ginkgoutil.RunSuite(t, "Import Suite") +} + +var _ = Describe("plugin decode", func() { + Context("composite schedule", func() { + input := func() map[string]interface{} { + return map[string]interface{}{ + "schedule": []map[string]interface{}{ + {"type": "once", "times": 1}, + {"type": "const", "ops": 1, "duration": "1s"}, + }, + } + } + + It("plugin", func() { + var conf struct { + Schedule core.Schedule + } + err := config.Decode(input(), &conf) + Expect(err).NotTo(HaveOccurred()) + coretest.ExpectScheduleNexts(conf.Schedule, 0, 0, time.Second) + }) + + It("plugin factory", func() { + var conf struct { + Schedule func() (core.Schedule, error) + } + err := config.Decode(input(), &conf) + Expect(err).NotTo(HaveOccurred()) + sched, err := conf.Schedule() + Expect(err).NotTo(HaveOccurred()) + coretest.ExpectScheduleNexts(sched, 0, 0, time.Second) + }) + }) +}) + +func TestSink(t *testing.T) { + defer resetGlobals() + fs := afero.NewMemMapFs() + const filename = "/xxx" + Import(fs) + + tests := []struct { + name string + input map[string]interface{} + }{ + {"hooked", testConfig( + "stdout", "stdout", + "stderr", "stderr", + "file", filename, + )}, + {"explicit", testConfig( + "stdout", testConfig("type", "stdout"), + "stderr", testConfig("type", "stderr"), + "file", testConfig( + "type", "file", + "path", filename, + ), + )}, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var conf struct { + Stdout func() core.DataSink + Stderr func() core.DataSink + File core.DataSink + } + err := config.Decode(test.input, &conf) + require.NoError(t, err) + coretest.AssertSinkEqualStdStream(t, &os.Stdout, conf.Stdout) + coretest.AssertSinkEqualStdStream(t, &os.Stderr, conf.Stderr) + coretest.AssertSinkEqualFile(t, fs, filename, conf.File) + }) + } +} + +func TestProviderJSONLine(t *testing.T) { + testutil.ReplaceGlobalLogger() + defer resetGlobals() + fs := afero.NewMemMapFs() + const filename = "/xxx" + Import(fs) + input := testConfig( + "aggregator", testConfig( + "type", "jsonlines", + "sink", filename, + ), + ) + + var conf struct { + Aggregator core.Aggregator + } + err := config.Decode(input, &conf) + require.NoError(t, err) + + conf.Aggregator.Report([]int{0, 1, 2}) + ctx, cancel := context.WithCancel(context.Background()) + cancel() + err = conf.Aggregator.Run(ctx, core.AggregatorDeps{zap.L()}) + require.NoError(t, err) + + testutil.AssertFileEqual(t, fs, filename, "[0,1,2]\n") +} + +// TODO(skipor): test datasources + +func testConfig(keyValuePairs ...interface{}) map[string]interface{} { + if len(keyValuePairs)%2 != 0 { + panic("invalid len") + } + result := map[string]interface{}{} + for i := 0; i < len(keyValuePairs); i += 2 { + key := keyValuePairs[i].(string) + value := keyValuePairs[i+1] + result[key] = value + } + return result +} + +func resetGlobals() { + plugin.SetDefaultRegistry(plugin.NewRegistry()) + config.SetHooks(config.DefaultHooks()) + testutil.ReplaceGlobalLogger() +} diff --git a/core/mocks/aggregator.go b/core/mocks/aggregator.go new file mode 100644 index 000000000..ecead2740 --- /dev/null +++ b/core/mocks/aggregator.go @@ -0,0 +1,30 @@ +// Code generated by mockery v1.0.0 +package coremock + +import "context" +import "github.com/yandex/pandora/core" +import "github.com/stretchr/testify/mock" + +// Aggregator is an autogenerated mock type for the Aggregator type +type Aggregator struct { + mock.Mock +} + +// Report provides a mock function with given fields: s +func (_m *Aggregator) Report(s core.Sample) { + _m.Called(s) +} + +// Run provides a mock function with given fields: ctx, deps +func (_m *Aggregator) Run(ctx context.Context, deps core.AggregatorDeps) error { + ret := _m.Called(ctx, deps) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, core.AggregatorDeps) error); ok { + r0 = rf(ctx, deps) + } else { + r0 = ret.Error(0) + } + + return r0 +} diff --git a/core/mocks/borrowed_sample.go b/core/mocks/borrowed_sample.go new file mode 100644 index 000000000..b64a546f5 --- /dev/null +++ b/core/mocks/borrowed_sample.go @@ -0,0 +1,14 @@ +// Code generated by mockery v1.0.0 +package coremock + +import mock "github.com/stretchr/testify/mock" + +// BorrowedSample is an autogenerated mock type for the BorrowedSample type +type BorrowedSample struct { + mock.Mock +} + +// Return provides a mock function with given fields: +func (_m *BorrowedSample) Return() { + _m.Called() +} diff --git a/core/mocks/data_sink.go b/core/mocks/data_sink.go new file mode 100644 index 000000000..ada729f56 --- /dev/null +++ b/core/mocks/data_sink.go @@ -0,0 +1,33 @@ +// Code generated by mockery v1.0.0 +package coremock + +import io "io" +import mock "github.com/stretchr/testify/mock" + +// DataSink is an autogenerated mock type for the DataSink type +type DataSink struct { + mock.Mock +} + +// OpenSink provides a mock function with given fields: +func (_m *DataSink) OpenSink() (io.WriteCloser, error) { + ret := _m.Called() + + var r0 io.WriteCloser + if rf, ok := ret.Get(0).(func() io.WriteCloser); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(io.WriteCloser) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/core/mocks/data_source.go b/core/mocks/data_source.go new file mode 100644 index 000000000..854c923c0 --- /dev/null +++ b/core/mocks/data_source.go @@ -0,0 +1,33 @@ +// Code generated by mockery v1.0.0 +package coremock + +import io "io" +import mock "github.com/stretchr/testify/mock" + +// DataSource is an autogenerated mock type for the DataSource type +type DataSource struct { + mock.Mock +} + +// OpenSource provides a mock function with given fields: +func (_m *DataSource) OpenSource() (io.ReadCloser, error) { + ret := _m.Called() + + var r0 io.ReadCloser + if rf, ok := ret.Get(0).(func() io.ReadCloser); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(io.ReadCloser) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/core/mocks/extras.go b/core/mocks/extras.go new file mode 100644 index 000000000..f0cae4c83 --- /dev/null +++ b/core/mocks/extras.go @@ -0,0 +1,17 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package coremock + +import ( + "fmt" + "unsafe" +) + +// Implement Stringer, so when Aggregator is passed as arg to another mock call, +// it not read and data races not created. +func (_m *Aggregator) String() string { + return fmt.Sprintf("coremock.Aggregator{%v}", unsafe.Pointer(_m)) +} diff --git a/core/mocks/gun.go b/core/mocks/gun.go new file mode 100644 index 000000000..c93ff36b2 --- /dev/null +++ b/core/mocks/gun.go @@ -0,0 +1,29 @@ +// Code generated by mockery v1.0.0 +package coremock + +import core "github.com/yandex/pandora/core" +import mock "github.com/stretchr/testify/mock" + +// Gun is an autogenerated mock type for the Gun type +type Gun struct { + mock.Mock +} + +// Bind provides a mock function with given fields: aggr, deps +func (_m *Gun) Bind(aggr core.Aggregator, deps core.GunDeps) error { + ret := _m.Called(aggr, deps) + + var r0 error + if rf, ok := ret.Get(0).(func(core.Aggregator, core.GunDeps) error); ok { + r0 = rf(aggr, deps) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Shoot provides a mock function with given fields: ammo +func (_m *Gun) Shoot(ammo core.Ammo) { + _m.Called(ammo) +} diff --git a/core/mocks/provider.go b/core/mocks/provider.go new file mode 100644 index 000000000..d94bd028a --- /dev/null +++ b/core/mocks/provider.go @@ -0,0 +1,59 @@ +// Code generated by mockery v1.0.0 +package coremock + +import context "context" +import core "github.com/yandex/pandora/core" +import mock "github.com/stretchr/testify/mock" + +// Provider is an autogenerated mock type for the Provider type +type Provider struct { + mock.Mock +} + +// Acquire provides a mock function with given fields: +func (_m *Provider) Acquire() (core.Ammo, bool) { + ret := _m.Called() + + if len(ret) == 1 { + if rf, ok := ret.Get(0).(func() (core.Ammo, bool)); ok { + return rf() + } + } + + var r0 core.Ammo + if rf, ok := ret.Get(0).(func() core.Ammo); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(core.Ammo) + } + } + + var r1 bool + if rf, ok := ret.Get(1).(func() bool); ok { + r1 = rf() + } else { + r1 = ret.Get(1).(bool) + } + + return r0, r1 +} + +// Release provides a mock function with given fields: ammo +func (_m *Provider) Release(ammo core.Ammo) { + _m.Called(ammo) +} + +// Run provides a mock function with given fields: ctx, deps +func (_m *Provider) Run(ctx context.Context, deps core.ProviderDeps) error { + ret := _m.Called(ctx, deps) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, core.ProviderDeps) error); ok { + r0 = rf(ctx, deps) + } else { + r0 = ret.Error(0) + } + + return r0 +} diff --git a/core/mocks/schedule.go b/core/mocks/schedule.go new file mode 100644 index 000000000..291b4554c --- /dev/null +++ b/core/mocks/schedule.go @@ -0,0 +1,56 @@ +// Code generated by mockery v1.0.0 +package coremock + +import mock "github.com/stretchr/testify/mock" +import time "time" + +// Schedule is an autogenerated mock type for the Schedule type +type Schedule struct { + mock.Mock +} + +// Left provides a mock function with given fields: +func (_m *Schedule) Left() int { + ret := _m.Called() + + var r0 int + if rf, ok := ret.Get(0).(func() int); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int) + } + + return r0 +} + +// Next provides a mock function with given fields: +func (_m *Schedule) Next() (time.Time, bool) { + ret := _m.Called() + + if len(ret) == 1 { + if rf, ok := ret.Get(0).(func() (time.Time, bool)); ok { + return rf() + } + } + + var r0 time.Time + if rf, ok := ret.Get(0).(func() time.Time); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(time.Time) + } + + var r1 bool + if rf, ok := ret.Get(1).(func() bool); ok { + r1 = rf() + } else { + r1 = ret.Get(1).(bool) + } + + return r0, r1 +} + +// Start provides a mock function with given fields: startAt +func (_m *Schedule) Start(startAt time.Time) { + _m.Called(startAt) +} diff --git a/core/plugin/constructor.go b/core/plugin/constructor.go new file mode 100644 index 000000000..399befd7d --- /dev/null +++ b/core/plugin/constructor.go @@ -0,0 +1,192 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package plugin + +import ( + "fmt" + "reflect" +) + +// implConstructor interface representing ability to create some plugin interface +// implementations and is't factory functions. Usually it wraps some newPlugin or newFactory function, and +// use is as implementation creator. +// implConstructor expects, that caller pass correct maybeConf value, that can be +// passed to underlying implementation creator. +type implConstructor interface { + // NewPlugin constructs plugin implementation. + NewPlugin(maybeConf []reflect.Value) (plugin interface{}, err error) + // getMaybeConf may be nil, if no config required. + // Underlying implementation creator may require new config for every instance create. + // If so, then getMaybeConf will be called on every instance create. Otherwise, only once. + NewFactory(factoryType reflect.Type, getMaybeConf func() ([]reflect.Value, error)) (pluginFactory interface{}, err error) +} + +func newImplConstructor(pluginType reflect.Type, constructor interface{}) implConstructor { + constructorType := reflect.TypeOf(constructor) + expect(constructorType.Kind() == reflect.Func, "plugin constructor should be func") + expect(constructorType.NumOut() >= 1, + "plugin constructor should return plugin implementation as first output parameter") + if constructorType.Out(0).Kind() == reflect.Func { + return newFactoryConstructor(pluginType, constructor) + } + return newPluginConstructor(pluginType, constructor) +} + +func newPluginConstructor(pluginType reflect.Type, newPlugin interface{}) *pluginConstructor { + expectPluginConstructor(pluginType, reflect.TypeOf(newPlugin), true) + return &pluginConstructor{pluginType, reflect.ValueOf(newPlugin)} +} + +// pluginConstructor use newPlugin func([config ]) ( [, error]) +// to construct plugin implementations +type pluginConstructor struct { + pluginType reflect.Type + newPlugin reflect.Value +} + +func (c *pluginConstructor) NewPlugin(maybeConf []reflect.Value) (plugin interface{}, err error) { + out := c.newPlugin.Call(maybeConf) + plugin = out[0].Interface() + if len(out) > 1 { + err, _ = out[1].Interface().(error) + } + return +} + +func (c *pluginConstructor) NewFactory(factoryType reflect.Type, getMaybeConf func() ([]reflect.Value, error)) (interface{}, error) { + if c.newPlugin.Type() == factoryType { + return c.newPlugin.Interface(), nil + } + return reflect.MakeFunc(factoryType, func(in []reflect.Value) []reflect.Value { + var maybeConf []reflect.Value + if getMaybeConf != nil { + var err error + maybeConf, err = getMaybeConf() + if err != nil { + switch factoryType.NumOut() { + case 1: + panic(err) + case 2: + return []reflect.Value{reflect.Zero(c.pluginType), reflect.ValueOf(&err).Elem()} + default: + panic(fmt.Sprintf(" out params num expeced to be 1 or 2, but have: %v", factoryType.NumOut())) + } + } + } + out := c.newPlugin.Call(maybeConf) + return convertFactoryOutParams(c.pluginType, factoryType.NumOut(), out) + }).Interface(), nil +} + +// factoryConstructor use newFactory func([config ]) (func() ([, error])[, error) +// to construct plugin implementations. +type factoryConstructor struct { + pluginType reflect.Type + newFactory reflect.Value +} + +func newFactoryConstructor(pluginType reflect.Type, newFactory interface{}) *factoryConstructor { + newFactoryType := reflect.TypeOf(newFactory) + expect(newFactoryType.Kind() == reflect.Func, "factory constructor should be func") + expect(newFactoryType.NumIn() <= 1, "factory constructor should accept config or nothing") + + expect(1 <= newFactoryType.NumOut() && newFactoryType.NumOut() <= 2, + "factory constructor should return factory, and optionally error") + if newFactoryType.NumOut() == 2 { + expect(newFactoryType.Out(1) == errorType, "factory constructor should have no second return value, or it should be error") + } + factoryType := newFactoryType.Out(0) + expectPluginConstructor(pluginType, factoryType, false) + return &factoryConstructor{pluginType, reflect.ValueOf(newFactory)} +} + +func (c *factoryConstructor) NewPlugin(maybeConf []reflect.Value) (plugin interface{}, err error) { + factory, err := c.callNewFactory(maybeConf) + if err != nil { + return nil, err + } + out := factory.Call(nil) + plugin = out[0].Interface() + if len(out) > 1 { + err, _ = out[1].Interface().(error) + } + return +} + +func (c *factoryConstructor) NewFactory(factoryType reflect.Type, getMaybeConf func() ([]reflect.Value, error)) (interface{}, error) { + var maybeConf []reflect.Value + if getMaybeConf != nil { + var err error + maybeConf, err = getMaybeConf() + if err != nil { + return nil, err + } + } + factory, err := c.callNewFactory(maybeConf) + if err != nil { + return nil, err + } + if factory.Type() == factoryType { + return factory.Interface(), nil + } + return reflect.MakeFunc(factoryType, func(in []reflect.Value) []reflect.Value { + out := factory.Call(nil) + return convertFactoryOutParams(c.pluginType, factoryType.NumOut(), out) + }).Interface(), nil +} + +func (c *factoryConstructor) callNewFactory(maybeConf []reflect.Value) (factory reflect.Value, err error) { + factoryAndMaybeErr := c.newFactory.Call(maybeConf) + if len(factoryAndMaybeErr) > 1 { + err, _ = factoryAndMaybeErr[1].Interface().(error) + } + return factoryAndMaybeErr[0], err +} + +// expectPluginConstructor checks type expectations common for newPlugin, and factory, returned from newFactory. +func expectPluginConstructor(pluginType, factoryType reflect.Type, configAllowed bool) { + expect(factoryType.Kind() == reflect.Func, "plugin constructor should be func") + if configAllowed { + expect(factoryType.NumIn() <= 1, "plugin constructor should accept config or nothing") + } else { + expect(factoryType.NumIn() == 0, "plugin constructor returned from newFactory, shouldn't accept any arguments") + } + expect(1 <= factoryType.NumOut() && factoryType.NumOut() <= 2, + "plugin constructor should return plugin implementation, and optionally error") + pluginImplType := factoryType.Out(0) + expect(pluginImplType.Implements(pluginType), "plugin constructor should implement plugin interface") + if factoryType.NumOut() == 2 { + expect(factoryType.Out(1) == errorType, "plugin constructor should have no second return value, or it should be error") + } +} + +// convertFactoryOutParams converts output params of some factory (newPlugin) call to required. +func convertFactoryOutParams(pluginType reflect.Type, numOut int, out []reflect.Value) []reflect.Value { + switch numOut { + case 1, 2: + // OK. + default: + panic(fmt.Sprintf("unexpeced out params num: %v; 1 or 2 expected", numOut)) + } + if out[0].Type() != pluginType { + // Not plugin, but its implementation. + impl := out[0] + out[0] = reflect.New(pluginType).Elem() + out[0].Set(impl) + } + if len(out) < numOut { + // Registered factory returns no error, but we should. + out = append(out, reflect.Zero(errorType)) + } + if numOut < len(out) { + // Registered factory returns error, but we should not. + if !out[1].IsNil() { + panic(out[1].Interface()) + } + out = out[:1] + } + return out +} diff --git a/core/plugin/constructor_test.go b/core/plugin/constructor_test.go new file mode 100644 index 000000000..f51b94401 --- /dev/null +++ b/core/plugin/constructor_test.go @@ -0,0 +1,360 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package plugin + +import ( + "errors" + "fmt" + "reflect" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/extensions/table" + . "github.com/onsi/gomega" +) + +var _ = Describe("plugin constructor", func() { + DescribeTable("expectations failed", + func(newPlugin interface{}) { + defer recoverExpectationFail() + newPluginConstructor(ptestType(), newPlugin) + }, + Entry("not func", + errors.New("that is not constructor")), + Entry("not implements", + func() struct{} { panic("") }), + Entry("too many args", + func(_, _ ptestConfig) ptestPlugin { panic("") }), + Entry("too many return valued", + func() (_ ptestPlugin, _, _ error) { panic("") }), + Entry("second return value is not error", + func() (_, _ ptestPlugin) { panic("") }), + ) + + Context("new plugin", func() { + newPlugin := func(newPlugin interface{}, maybeConf []reflect.Value) (interface{}, error) { + testee := newPluginConstructor(ptestType(), newPlugin) + return testee.NewPlugin(maybeConf) + } + + It("", func() { + plugin, err := newPlugin(ptestNew, nil) + Expect(err).NotTo(HaveOccurred()) + ptestExpectConfigValue(plugin, ptestInitValue) + }) + + It("more that plugin", func() { + plugin, err := newPlugin(ptestNewMoreThan, nil) + Expect(err).NotTo(HaveOccurred()) + ptestExpectConfigValue(plugin, ptestInitValue) + }) + + It("config", func() { + plugin, err := newPlugin(ptestNewConf, confToMaybe(ptestDefaultConf())) + Expect(err).NotTo(HaveOccurred()) + ptestExpectConfigValue(plugin, ptestDefaultValue) + }) + + It("failed", func() { + plugin, err := newPlugin(ptestNewErrFailing, nil) + Expect(err).To(Equal(ptestCreateFailedErr)) + Expect(plugin).To(BeNil()) + }) + }) + + Context("new factory", func() { + newFactoryOK := func(newPlugin interface{}, factoryType reflect.Type, getMaybeConf func() ([]reflect.Value, error)) interface{} { + testee := newPluginConstructor(ptestType(), newPlugin) + factory, err := testee.NewFactory(factoryType, getMaybeConf) + Expect(err).NotTo(HaveOccurred()) + return factory + } + + It("same type - no wrap", func() { + factory := newFactoryOK(ptestNew, ptestNewType(), nil) + expectSameFunc(factory, ptestNew) + }) + + It(" new impl", func() { + factory := newFactoryOK(ptestNewImpl, ptestNewType(), nil) + f, ok := factory.(func() ptestPlugin) + Expect(ok).To(BeTrue()) + plugin := f() + ptestExpectConfigValue(plugin, ptestInitValue) + }) + + It("more than", func() { + factory := newFactoryOK(ptestNewMoreThan, ptestNewType(), nil) + f, ok := factory.(func() ptestPlugin) + Expect(ok).To(BeTrue()) + plugin := f() + ptestExpectConfigValue(plugin, ptestInitValue) + }) + + It("add err", func() { + factory := newFactoryOK(ptestNew, ptestNewErrType(), nil) + f, ok := factory.(func() (ptestPlugin, error)) + Expect(ok).To(BeTrue()) + plugin, err := f() + Expect(err).NotTo(HaveOccurred()) + ptestExpectConfigValue(plugin, ptestInitValue) + }) + + It("trim nil err", func() { + factory := newFactoryOK(ptestNewErr, ptestNewType(), nil) + f, ok := factory.(func() ptestPlugin) + Expect(ok).To(BeTrue()) + plugin := f() + ptestExpectConfigValue(plugin, ptestInitValue) + }) + + It("config", func() { + factory := newFactoryOK(ptestNewConf, ptestNewType(), confToGetMaybe(ptestDefaultConf())) + f, ok := factory.(func() ptestPlugin) + Expect(ok).To(BeTrue()) + plugin := f() + ptestExpectConfigValue(plugin, ptestDefaultValue) + }) + + It("new factory, get config failed", func() { + factory := newFactoryOK(ptestNewConf, ptestNewErrType(), errToGetMaybe(ptestConfigurationFailedErr)) + f, ok := factory.(func() (ptestPlugin, error)) + Expect(ok).To(BeTrue()) + plugin, err := f() + Expect(err).To(Equal(ptestConfigurationFailedErr)) + Expect(plugin).To(BeNil()) + }) + + It("no err, get config failed, throw panic", func() { + factory := newFactoryOK(ptestNewConf, ptestNewType(), errToGetMaybe(ptestConfigurationFailedErr)) + f, ok := factory.(func() ptestPlugin) + Expect(ok).To(BeTrue()) + func() { + defer func() { + r := recover() + Expect(r).To(Equal(ptestConfigurationFailedErr)) + }() + f() + }() + }) + + It("panic on trim non nil err", func() { + factory := newFactoryOK(ptestNewErrFailing, ptestNewType(), nil) + f, ok := factory.(func() ptestPlugin) + Expect(ok).To(BeTrue()) + func() { + defer func() { + r := recover() + Expect(r).To(Equal(ptestCreateFailedErr)) + }() + f() + }() + }) + + }) +}) + +var _ = Describe("factory constructor", func() { + DescribeTable("expectations failed", + func(newPlugin interface{}) { + defer recoverExpectationFail() + newFactoryConstructor(ptestType(), newPlugin) + }, + Entry("not func", + errors.New("that is not constructor")), + Entry("returned not func", + func() error { panic("") }), + Entry("too many args", + func(_, _ ptestConfig) func() ptestPlugin { panic("") }), + Entry("too many return valued", + func() (func() ptestPlugin, error, error) { panic("") }), + Entry("second return value is not error", + func() (func() ptestPlugin, ptestPlugin) { panic("") }), + Entry("factory accepts conf", + func() func(config ptestConfig) ptestPlugin { panic("") }), + Entry("not implements", + func() func() struct{} { panic("") }), + Entry("factory too many args", + func() func(_, _ ptestConfig) ptestPlugin { panic("") }), + Entry("factory too many return valued", + func() func() (_ ptestPlugin, _, _ error) { panic("") }), + Entry("factory second return value is not error", + func() func() (_, _ ptestPlugin) { panic("") }), + ) + + Context("new plugin", func() { + newPlugin := func(newFactory interface{}, maybeConf []reflect.Value) (interface{}, error) { + testee := newFactoryConstructor(ptestType(), newFactory) + return testee.NewPlugin(maybeConf) + } + + It("", func() { + plugin, err := newPlugin(ptestNewFactory, nil) + Expect(err).NotTo(HaveOccurred()) + ptestExpectConfigValue(plugin, ptestInitValue) + }) + + It("impl", func() { + plugin, err := newPlugin(ptestNewFactoryImpl, nil) + Expect(err).NotTo(HaveOccurred()) + ptestExpectConfigValue(plugin, ptestInitValue) + }) + + It("impl more than", func() { + plugin, err := newPlugin(ptestNewFactoryMoreThan, nil) + Expect(err).NotTo(HaveOccurred()) + ptestExpectConfigValue(plugin, ptestInitValue) + }) + + It("config", func() { + plugin, err := newPlugin(ptestNewFactoryConf, confToMaybe(ptestDefaultConf())) + Expect(err).NotTo(HaveOccurred()) + ptestExpectConfigValue(plugin, ptestDefaultValue) + }) + + It("failed", func() { + plugin, err := newPlugin(ptestNewFactoryErrFailing, nil) + Expect(err).To(Equal(ptestCreateFailedErr)) + Expect(plugin).To(BeNil()) + }) + + It("factory failed", func() { + plugin, err := newPlugin(ptestNewFactoryFactoryErrFailing, nil) + Expect(err).To(Equal(ptestCreateFailedErr)) + Expect(plugin).To(BeNil()) + }) + }) + + Context("new factory", func() { + newFactory := func(newFactory interface{}, factoryType reflect.Type, getMaybeConf func() ([]reflect.Value, error)) (interface{}, error) { + testee := newFactoryConstructor(ptestType(), newFactory) + return testee.NewFactory(factoryType, getMaybeConf) + } + newFactoryOK := func(newF interface{}, factoryType reflect.Type, getMaybeConf func() ([]reflect.Value, error)) interface{} { + factory, err := newFactory(newF, factoryType, getMaybeConf) + Expect(err).NotTo(HaveOccurred()) + return factory + } + + It("no err, same type - no wrap", func() { + factory := newFactoryOK(ptestNewFactory, ptestNewType(), nil) + expectSameFunc(factory, ptestNew) + }) + + It("has err, same type - no wrap", func() { + factory := newFactoryOK(ptestNewFactoryFactoryErr, ptestNewErrType(), nil) + expectSameFunc(factory, ptestNewErr) + }) + + It("from new impl", func() { + factory := newFactoryOK(ptestNewFactoryImpl, ptestNewType(), nil) + f, ok := factory.(func() ptestPlugin) + Expect(ok).To(BeTrue()) + plugin := f() + ptestExpectConfigValue(plugin, ptestInitValue) + }) + + It("from new impl", func() { + factory := newFactoryOK(ptestNewFactoryMoreThan, ptestNewType(), nil) + f, ok := factory.(func() ptestPlugin) + Expect(ok).To(BeTrue()) + plugin := f() + ptestExpectConfigValue(plugin, ptestInitValue) + }) + + It("add err", func() { + factory := newFactoryOK(ptestNewFactory, ptestNewErrType(), nil) + f, ok := factory.(func() (ptestPlugin, error)) + Expect(ok).To(BeTrue()) + plugin, err := f() + Expect(err).NotTo(HaveOccurred()) + ptestExpectConfigValue(plugin, ptestInitValue) + }) + + It("factory construction not failed", func() { + factory := newFactoryOK(ptestNewFactoryErr, ptestNewType(), nil) + f, ok := factory.(func() ptestPlugin) + Expect(ok).To(BeTrue()) + plugin := f() + ptestExpectConfigValue(plugin, ptestInitValue) + }) + + It("trim nil err", func() { + factory := newFactoryOK(ptestNewFactoryFactoryErr, ptestNewType(), nil) + f, ok := factory.(func() ptestPlugin) + Expect(ok).To(BeTrue()) + plugin := f() + ptestExpectConfigValue(plugin, ptestInitValue) + }) + + It("config", func() { + factory := newFactoryOK(ptestNewFactoryConf, ptestNewType(), confToGetMaybe(ptestDefaultConf())) + f, ok := factory.(func() ptestPlugin) + Expect(ok).To(BeTrue()) + plugin := f() + ptestExpectConfigValue(plugin, ptestDefaultValue) + }) + + It("get config failed", func() { + factory, err := newFactory(ptestNewFactoryConf, ptestNewErrType(), errToGetMaybe(ptestConfigurationFailedErr)) + Expect(err).To(Equal(ptestConfigurationFailedErr)) + Expect(factory).To(BeNil()) + }) + + It("factory create failed", func() { + factory, err := newFactory(ptestNewFactoryErrFailing, ptestNewErrType(), nil) + Expect(err).To(Equal(ptestCreateFailedErr)) + Expect(factory).To(BeNil()) + }) + + It("plugin create failed", func() { + factory := newFactoryOK(ptestNewFactoryFactoryErrFailing, ptestNewErrType(), nil) + f, ok := factory.(func() (ptestPlugin, error)) + Expect(ok).To(BeTrue()) + plugin, err := f() + Expect(err).To(Equal(ptestCreateFailedErr)) + Expect(plugin).To(BeNil()) + }) + + It("panic on trim non nil err", func() { + factory := newFactoryOK(ptestNewFactoryFactoryErrFailing, ptestNewType(), nil) + f, ok := factory.(func() ptestPlugin) + Expect(ok).To(BeTrue()) + func() { + defer func() { + r := recover() + Expect(r).To(Equal(ptestCreateFailedErr)) + }() + f() + }() + }) + + }) +}) + +func confToMaybe(conf interface{}) []reflect.Value { + if conf != nil { + return []reflect.Value{reflect.ValueOf(conf)} + } + return nil +} + +func confToGetMaybe(conf interface{}) func() ([]reflect.Value, error) { + return func() ([]reflect.Value, error) { + return confToMaybe(conf), nil + } +} + +func errToGetMaybe(err error) func() ([]reflect.Value, error) { + return func() ([]reflect.Value, error) { + return nil, err + } +} + +func expectSameFunc(f1, f2 interface{}) { + s1 := fmt.Sprint(f1) + s2 := fmt.Sprint(f2) + Expect(s1).To(Equal(s2)) +} diff --git a/core/plugin/doc.go b/core/plugin/doc.go new file mode 100644 index 000000000..59b90d8df --- /dev/null +++ b/core/plugin/doc.go @@ -0,0 +1,30 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +// Package plugin provides a generic inversion of control model for making +// extensible Go packages, libraries, and applications. Like +// github.com/progrium/go-extpoints, but reflect based: doesn't require code +// generation, but have more overhead; provide more flexibility, but less type +// safety. It allows to register constructor for some plugin interface, and create +// new plugin instances or plugin instance factories. +// Main feature is flexible plugin configuration: plugin factory can +// accept config struct, that could be filled by passed hook. Config default +// values could be provided by registering default config factory. +// Such flexibility can be used to decode structured text (json/yaml/etc) into +// struct. +// +// Type expectations. +// Here and bellow we mean by some type expectations. +// [some type signature part] means that this part of type signature is optional. +// +// Plugin type, let's label it as , should be interface. +// Registered plugin constructor should be one of: or . +// should have type func([config ]) ([, error]). +// should have type func([config [, error])[, error]). +// should be assignable to . +// That is, methods should be subset methods. In other words, should be +// some implementation, or interface, that contains methods as subset. +// type should be struct or struct pointer. +package plugin diff --git a/core/plugin/example_test.go b/core/plugin/example_test.go new file mode 100644 index 000000000..b2f1954cb --- /dev/null +++ b/core/plugin/example_test.go @@ -0,0 +1,24 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package plugin_test + +import "github.com/yandex/pandora/core/plugin" + +type Plugin interface { + DoSmth() +} + +func RegisterPlugin(name string, newPluginImpl interface{}, newDefaultConfigOptional ...interface{}) { + var p Plugin + plugin.Register(plugin.PtrType(&p), name, newPluginImpl, newDefaultConfigOptional...) +} + +func ExampleRegister() { + type Conf struct{ Smth string } + New := func(Conf) Plugin { panic("") } + RegisterPlugin("no-default", New) + RegisterPlugin("default", New, func() Conf { return Conf{"example"} }) +} diff --git a/core/plugin/plugin.go b/core/plugin/plugin.go new file mode 100644 index 000000000..1d7f00619 --- /dev/null +++ b/core/plugin/plugin.go @@ -0,0 +1,98 @@ +// Copyright (c) 2016 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package plugin + +import ( + "fmt" + "reflect" +) + +// DefaultRegistry returns default Registry used for package *Registry methods like functions. +func DefaultRegistry() *Registry { + return defaultRegistry +} + +// SetDefaultRegistry sets default Registry used for package *Registry methods like functions. +func SetDefaultRegistry(registry *Registry) { + defaultRegistry = registry +} + +// Register is DefaultRegistry().Register shortcut. +func Register( + pluginType reflect.Type, + name string, + newPluginImpl interface{}, + defaultConfigOptional ...interface{}, +) { + DefaultRegistry().Register(pluginType, name, newPluginImpl, defaultConfigOptional...) +} + +// Lookup is DefaultRegistry().Lookup shortcut. +func Lookup(pluginType reflect.Type) bool { + return DefaultRegistry().Lookup(pluginType) +} + +// LookupFactory is DefaultRegistry().LookupFactory shortcut. +func LookupFactory(factoryType reflect.Type) bool { + return DefaultRegistry().LookupFactory(factoryType) +} + +// New is DefaultRegistry().New shortcut. +func New(pluginType reflect.Type, name string, fillConfOptional ...func(conf interface{}) error) (plugin interface{}, err error) { + return defaultRegistry.New(pluginType, name, fillConfOptional...) +} + +// NewFactory is DefaultRegistry().NewFactory shortcut. +func NewFactory(factoryType reflect.Type, name string, fillConfOptional ...func(conf interface{}) error) (factory interface{}, err error) { + return defaultRegistry.NewFactory(factoryType, name, fillConfOptional...) +} + +// PtrType is helper to extract plugin types. +// Example: plugin.PtrType((*PluginInterface)(nil)) instead of +// reflect.TypeOf((*PluginInterface)(nil)).Elem() +func PtrType(ptr interface{}) reflect.Type { + t := reflect.TypeOf(ptr) + if t.Kind() != reflect.Ptr { + panic("passed value is not pointer") + } + return t.Elem() +} + +// FactoryPluginType returns (SomeInterface, true) if factoryType looks like func() (SomeInterface[, error]) +// or (nil, false) otherwise. +func FactoryPluginType(factoryType reflect.Type) (plugin reflect.Type, ok bool) { + if isFactoryType(factoryType) { + return factoryType.Out(0), true + } + return +} + +// isFactoryType returns true, if type looks like func() (SomeInterface[, error]) +func isFactoryType(t reflect.Type) bool { + hasProperParamsNum := t.Kind() == reflect.Func && + t.NumIn() == 0 && + (t.NumOut() == 1 || t.NumOut() == 2) + if !hasProperParamsNum { + return false + } + if t.Out(0).Kind() != reflect.Interface { + return false + } + if t.NumOut() == 1 { + return true + } + return t.Out(1) == errorType +} + +var defaultRegistry = NewRegistry() + +var errorType = reflect.TypeOf((*error)(nil)).Elem() + +func expect(b bool, msg string, args ...interface{}) { + if !b { + panic(fmt.Sprintf("expectation failed: "+msg, args...)) + } +} diff --git a/core/plugin/plugin_suite_test.go b/core/plugin/plugin_suite_test.go new file mode 100644 index 000000000..0cb918eb6 --- /dev/null +++ b/core/plugin/plugin_suite_test.go @@ -0,0 +1,16 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package plugin + +import ( + "testing" + + "github.com/yandex/pandora/lib/ginkgoutil" +) + +func TestPlugin(t *testing.T) { + ginkgoutil.RunSuite(t, "Plugin Suite") +} diff --git a/core/plugin/plugin_test.go b/core/plugin/plugin_test.go new file mode 100644 index 000000000..a3c9e37b5 --- /dev/null +++ b/core/plugin/plugin_test.go @@ -0,0 +1,48 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package plugin + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("Default registry", func() { + BeforeEach(func() { + Register(ptestType(), ptestPluginName, ptestNewImpl) + }) + AfterEach(func() { + defaultRegistry = NewRegistry() + }) + It("lookup", func() { + Expect(Lookup(ptestType())).To(BeTrue()) + }) + It("lookup factory", func() { + Expect(LookupFactory(ptestNewErrType())).To(BeTrue()) + }) + It("new", func() { + plugin, err := New(ptestType(), ptestPluginName) + Expect(err).NotTo(HaveOccurred()) + Expect(plugin).NotTo(BeNil()) + }) + It("new factory", func() { + pluginFactory, err := NewFactory(ptestNewErrType(), ptestPluginName) + Expect(err).NotTo(HaveOccurred()) + Expect(pluginFactory).NotTo(BeNil()) + }) +}) + +var _ = Describe("type helpers", func() { + It("ptr type", func() { + var plugin ptestPlugin + Expect(PtrType(&plugin)).To(Equal(ptestType())) + }) + It("factory plugin type ok", func() { + factoryPlugin, ok := FactoryPluginType(ptestNewErrType()) + Expect(ok).To(BeTrue()) + Expect(factoryPlugin).To(Equal(ptestType())) + }) +}) diff --git a/core/plugin/pluginconfig/hooks.go b/core/plugin/pluginconfig/hooks.go new file mode 100644 index 000000000..fe669bb49 --- /dev/null +++ b/core/plugin/pluginconfig/hooks.go @@ -0,0 +1,124 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +// Package pluginconfig contains integration plugin with config packages. +// Doing such integration in different package allows to config and plugin packages +// not depend on each other, and set hooks when their are really needed. +package pluginconfig + +import ( + "fmt" + "reflect" + "strings" + + "github.com/pkg/errors" + "go.uber.org/zap" + + "github.com/yandex/pandora/core/config" + "github.com/yandex/pandora/core/plugin" + "github.com/yandex/pandora/lib/tag" +) + +func AddHooks() { + config.AddTypeHook(Hook) + config.AddTypeHook(FactoryHook) +} + +const PluginNameKey = "type" + +func Hook(f reflect.Type, t reflect.Type, data interface{}) (p interface{}, err error) { + if !plugin.Lookup(t) { + return data, nil + } + name, fillConf, err := parseConf(t, data) + if err != nil { + return + } + return plugin.New(t, name, fillConf) +} + +func FactoryHook(f reflect.Type, t reflect.Type, data interface{}) (p interface{}, err error) { + if !plugin.LookupFactory(t) { + return data, nil + } + name, fillConf, err := parseConf(t, data) + if err != nil { + return + } + return plugin.NewFactory(t, name, fillConf) +} + +func parseConf(t reflect.Type, data interface{}) (name string, fillConf func(conf interface{}) error, err error) { + if tag.Debug { + zap.L().Debug("Parsing plugin config", + zap.Stringer("plugin", t), + zap.Reflect("conf", data), + ) + } + confData, err := toStringKeyMap(data) + if err != nil { + return + } + var names []string + for key, val := range confData { + if PluginNameKey == strings.ToLower(key) { + strVal, ok := val.(string) + if !ok { + err = errors.Errorf("%s has non-string value %s", PluginNameKey, val) + return + } + names = append(names, strVal) + delete(confData, key) + } + } + if len(names) == 0 { + err = errors.Errorf("plugin %s expected", PluginNameKey) + return + } + if len(names) > 1 { + err = errors.Errorf("too many %s keys", PluginNameKey) + return + } + name = names[0] + fillConf = func(conf interface{}) error { + if tag.Debug { + zap.L().Debug("Decoding plugin", + zap.String("name", name), + zap.Stringer("type", t), + zap.Stringer("config type", reflect.TypeOf(conf).Elem()), + zap.String("config data", fmt.Sprint(confData)), + ) + } + err := config.DecodeAndValidate(confData, conf) + if err != nil { + err = errors.Errorf("%s %s plugin\n"+ + "%s from %v %s", + t, name, reflect.TypeOf(conf).Elem(), confData, err) + } + return err + } + return +} + +func toStringKeyMap(data interface{}) (out map[string]interface{}, err error) { + out, ok := data.(map[string]interface{}) + if ok { + return + } + untypedKeyData, ok := data.(map[interface{}]interface{}) + if !ok { + err = errors.Errorf("unexpected config type %T: should be map[string or interface{}]interface{}", data) + return + } + out = make(map[string]interface{}, len(untypedKeyData)) + for key, val := range untypedKeyData { + strKey, ok := key.(string) + if !ok { + err = errors.Errorf("unexpected key type %T: %v", key, key) + } + out[strKey] = val + } + return +} diff --git a/core/plugin/pluginconfig/hooks_test.go b/core/plugin/pluginconfig/hooks_test.go new file mode 100644 index 000000000..7237d31e2 --- /dev/null +++ b/core/plugin/pluginconfig/hooks_test.go @@ -0,0 +1,104 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package pluginconfig + +import ( + "fmt" + "reflect" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/yandex/pandora/core/config" + "github.com/yandex/pandora/core/plugin" +) + +func init() { + AddHooks() +} + +type TestPlugin interface { + GetData() string +} + +type testPluginImpl struct { + testPluginConf +} + +type testPluginConf struct { + Data string `validate:"max=20"` +} + +func (v *testPluginImpl) GetData() string { return v.Data } + +func TestPluginHooks(t *testing.T) { + dataConf := func(conf interface{}) map[string]interface{} { + return map[string]interface{}{ + "plugin": conf, + } + } + const pluginName = "test_hook_plugin" + plugin.Register(reflect.TypeOf((*TestPlugin)(nil)).Elem(), pluginName, func(c testPluginConf) TestPlugin { return &testPluginImpl{c} }) + + const expectedData = "expected data" + + validConfig := func() interface{} { + return dataConf(map[interface{}]interface{}{ + PluginNameKey: pluginName, + "data": expectedData, + }) + } + invalidConfigs := []map[interface{}]interface{}{ + {}, + { + PluginNameKey: pluginName, + strings.ToUpper(PluginNameKey): pluginName, + }, + { + PluginNameKey: pluginName, + "data": expectedData, + "unused": "wtf", + }, + { + PluginNameKey: pluginName, + "data": "invalid because is toooooo looooong", + }, + } + testInvalid := func(t *testing.T, data interface{}) { + for _, tc := range invalidConfigs { + t.Run(fmt.Sprintf("Invalid conf: %v", tc), func(t *testing.T) { + err := config.Decode(dataConf(tc), data) + assert.Error(t, err) + }) + } + } + + t.Run("plugin", func(t *testing.T) { + var data struct { + Plugin TestPlugin + } + err := config.Decode(validConfig(), &data) + require.NoError(t, err) + assert.Equal(t, expectedData, data.Plugin.GetData(), expectedData) + + testInvalid(t, data) + }) + + t.Run("factory", func(t *testing.T) { + var data struct { + Plugin func() (TestPlugin, error) + } + require.True(t, plugin.LookupFactory(plugin.PtrType(&data.Plugin))) + err := config.Decode(validConfig(), &data) + require.NoError(t, err) + plugin, err := data.Plugin() + require.NoError(t, err) + assert.Equal(t, expectedData, plugin.GetData(), expectedData) + + testInvalid(t, data) + }) +} diff --git a/core/plugin/ptest_test.go b/core/plugin/ptest_test.go new file mode 100644 index 000000000..cfae16f84 --- /dev/null +++ b/core/plugin/ptest_test.go @@ -0,0 +1,109 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package plugin + +import ( + "reflect" + + . "github.com/onsi/gomega" + "github.com/pkg/errors" + + "github.com/yandex/pandora/core/config" +) + +// ptest contains examples and utils for testing plugin pkg + +const ( + ptestPluginName = "ptest_name" + + ptestInitValue = "ptest_INITIAL" + ptestDefaultValue = "ptest_DEFAULT_CONFIG" + ptestFilledValue = "ptest_FILLED" +) + +func (r *Registry) ptestRegister(constructor interface{}, newDefaultConfigOptional ...interface{}) { + r.Register(ptestType(), ptestPluginName, constructor, newDefaultConfigOptional...) +} +func (r *Registry) ptestNew(fillConfOptional ...func(conf interface{}) error) (plugin interface{}, err error) { + return r.New(ptestType(), ptestPluginName, fillConfOptional...) +} +func (r *Registry) ptestNewFactory(fillConfOptional ...func(conf interface{}) error) (plugin interface{}, err error) { + factory, err := r.NewFactory(ptestNewErrType(), ptestPluginName, fillConfOptional...) + if err != nil { + return + } + typedFactory := factory.(func() (ptestPlugin, error)) + return typedFactory() +} + +var ( + ptestCreateFailedErr = errors.New("test plugin create failed") + ptestConfigurationFailedErr = errors.New("test plugin configuration failed") +) + +type ptestPlugin interface { + DoSomething() +} +type ptestMoreThanPlugin interface { + ptestPlugin + DoSomethingElse() +} +type ptestImpl struct{ Value string } +type ptestConfig struct{ Value string } + +func (p *ptestImpl) DoSomething() {} +func (p *ptestImpl) DoSomethingElse() {} + +func ptestNew() ptestPlugin { return ptestNewImpl() } +func ptestNewMoreThan() ptestMoreThanPlugin { return ptestNewImpl() } +func ptestNewImpl() *ptestImpl { return &ptestImpl{Value: ptestInitValue} } +func ptestNewConf(c ptestConfig) ptestPlugin { return &ptestImpl{c.Value} } +func ptestNewPtrConf(c *ptestConfig) ptestPlugin { return &ptestImpl{c.Value} } +func ptestNewErr() (ptestPlugin, error) { return &ptestImpl{Value: ptestInitValue}, nil } +func ptestNewErrFailing() (ptestPlugin, error) { return nil, ptestCreateFailedErr } + +func ptestNewFactory() func() ptestPlugin { return ptestNew } +func ptestNewFactoryMoreThan() func() ptestMoreThanPlugin { return ptestNewMoreThan } +func ptestNewFactoryImpl() func() *ptestImpl { return ptestNewImpl } +func ptestNewFactoryConf(c ptestConfig) func() ptestPlugin { + return func() ptestPlugin { + return ptestNewConf(c) + } +} +func ptestNewFactoryPtrConf(c *ptestConfig) func() ptestPlugin { + return func() ptestPlugin { + return ptestNewPtrConf(c) + } +} +func ptestNewFactoryErr() (func() ptestPlugin, error) { return ptestNew, nil } +func ptestNewFactoryErrFailing() (func() ptestPlugin, error) { return nil, ptestCreateFailedErr } +func ptestNewFactoryFactoryErr() func() (ptestPlugin, error) { return ptestNewErr } +func ptestNewFactoryFactoryErrFailing() func() (ptestPlugin, error) { return ptestNewErrFailing } + +func ptestDefaultConf() ptestConfig { return ptestConfig{ptestDefaultValue} } +func ptestNewDefaultPtrConf() *ptestConfig { return &ptestConfig{ptestDefaultValue} } + +func ptestType() reflect.Type { return PtrType((*ptestPlugin)(nil)) } +func ptestNewErrType() reflect.Type { return reflect.TypeOf(ptestNewErr) } +func ptestNewType() reflect.Type { return reflect.TypeOf(ptestNew) } + +func ptestFillConf(conf interface{}) error { + return config.Decode(map[string]interface{}{"Value": ptestFilledValue}, conf) +} + +func ptestExpectConfigValue(conf interface{}, val string) { + conf.(ptestConfChecker).expectConfValue(val) +} + +type ptestConfChecker interface { + expectConfValue(string) +} + +var _ ptestConfChecker = ptestConfig{} +var _ ptestConfChecker = &ptestImpl{} + +func (c ptestConfig) expectConfValue(val string) { Expect(c.Value).To(Equal(val)) } +func (p *ptestImpl) expectConfValue(val string) { Expect(p.Value).To(Equal(val)) } diff --git a/core/plugin/registry.go b/core/plugin/registry.go new file mode 100644 index 000000000..4ad36d59f --- /dev/null +++ b/core/plugin/registry.go @@ -0,0 +1,246 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package plugin + +import ( + "reflect" + + "github.com/pkg/errors" +) + +func NewRegistry() *Registry { + return &Registry{make(map[reflect.Type]nameRegistry)} +} + +type Registry struct { + typeToNameReg map[reflect.Type]nameRegistry +} + +func newNameRegistry() nameRegistry { return make(nameRegistry) } + +type nameRegistry map[string]nameRegistryEntry + +type nameRegistryEntry struct { + constructor implConstructor + defaultConfig defaultConfigContainer +} + +// Register registers plugin constructor and optional default config factory, +// for given plugin interface type and plugin name. +// See package doc for type expectations details. +// Register designed to be called in package init func, so it panics if something go wrong. +// Panics if type expectations are violated. +// Panics if some constructor have been already registered for this (pluginType, name) pair. +// Register is thread unsafe. +// +// If constructor receive config argument, default config factory can be +// registered. Default config factory type should be: is func() . +// Default config factory is optional. If no default config factory has been +// registered, than plugin factory will receive zero config (zero struct or +// pointer to zero struct). +// Registered constructor will never receive nil config, even there +// are no registered default config factory, or default config is nil. Config +// will be pointer to zero config in such case. +func (r *Registry) Register( + pluginType reflect.Type, + name string, + constructor interface{}, + defaultConfigOptional ...interface{}, // default config factory, or nothing. +) { + expect(pluginType.Kind() == reflect.Interface, "plugin type should be interface, but have: %T", pluginType) + expect(name != "", "empty name") + nameReg := r.typeToNameReg[pluginType] + if nameReg == nil { + nameReg = newNameRegistry() + r.typeToNameReg[pluginType] = nameReg + } + _, ok := nameReg[name] + expect(!ok, "plugin %s with name %q had been already registered", pluginType, name) + defaultConfig := getNewDefaultConfig(defaultConfigOptional) + nameReg[name] = newNameRegistryEntry(pluginType, constructor, defaultConfig) +} + +// Lookup returns true if any plugin constructor has been registered for given +// type. +func (r *Registry) Lookup(pluginType reflect.Type) bool { + _, ok := r.typeToNameReg[pluginType] + return ok +} + +// LookupFactory returns true if factoryType looks like func() (SomeInterface[, error]) +// and any plugin constructor has been registered for SomeInterface. +// That is, you may create instance of this factoryType using this registry. +func (r *Registry) LookupFactory(factoryType reflect.Type) bool { + return isFactoryType(factoryType) && r.Lookup(factoryType.Out(0)) +} + +// New creates plugin using registered plugin constructor. Returns error if creation +// failed or no plugin were registered for given type and name. +// Passed fillConf called on created config before calling plugin factory. +// fillConf argument is always valid struct pointer, even if plugin factory +// receives no config: fillConf is called on empty struct pointer in such case. +// fillConf error fails plugin creation. +// New is thread safe, if there is no concurrent Register calls. +func (r *Registry) New(pluginType reflect.Type, name string, fillConfOptional ...func(conf interface{}) error) (plugin interface{}, err error) { + expect(pluginType.Kind() == reflect.Interface, "plugin type should be interface, but have: %T", pluginType) + expect(name != "", "empty name") + fillConf := getFillConf(fillConfOptional) + registered, err := r.get(pluginType, name) + if err != nil { + return + } + conf, err := registered.defaultConfig.Get(fillConf) + if err != nil { + return nil, err + } + return registered.constructor.NewPlugin(conf) +} + +// NewFactory behaves like New, but creates factory func() (PluginInterface[, error]), that on call +// creates New plugin by registered factory. +// If registered constructor is config is created filled for every factory call, +// if . + newValue reflect.Value +} + +func newDefaultConfigContainer(constructorType reflect.Type, defaultConfig interface{}) defaultConfigContainer { + if constructorType.NumIn() == 0 { + expect(defaultConfig == nil, "constructor accept no config, but defaultConfig passed") + return defaultConfigContainer{} + } + expect(constructorType.NumIn() == 1, "constructor should accept zero or one argument") + configType := constructorType.In(0) + expect(configType.Kind() == reflect.Struct || + configType.Kind() == reflect.Ptr && configType.Elem().Kind() == reflect.Struct, + "unexpected config kind: %s; should be struct or struct pointer") + newDefaultConfigType := reflect.FuncOf(nil, []reflect.Type{configType}, false) + if defaultConfig == nil { + value := reflect.MakeFunc(newDefaultConfigType, + func(_ []reflect.Value) (results []reflect.Value) { + // OPTIMIZE: create addressable. + return []reflect.Value{reflect.Zero(configType)} + }) + return defaultConfigContainer{value} + } + value := reflect.ValueOf(defaultConfig) + expect(value.Type() == newDefaultConfigType, + "defaultConfig should be func that accepts nothing, and returns constructor argument, but have type %T", defaultConfig) + return defaultConfigContainer{value} +} + +// In reflect pkg []Value used to call functions. It's easier to return it, that convert from pointer when needed. +func (e defaultConfigContainer) Get(fillConf func(fillAddr interface{}) error) (maybeConf []reflect.Value, err error) { + var fillAddr interface{} + if e.configRequired() { + maybeConf, fillAddr = e.new() + } else { + var emptyStruct struct{} + fillAddr = &emptyStruct // No fields to fill. + } + if fillConf != nil { + err = fillConf(fillAddr) + if err != nil { + return nil, err + } + } + return +} + +func (e defaultConfigContainer) new() (maybeConf []reflect.Value, fillAddr interface{}) { + if !e.configRequired() { + panic("try to create config when not required") + } + conf := e.newValue.Call(nil)[0] + switch conf.Kind() { + case reflect.Struct: + // Config can be filled only by pointer. + if !conf.CanAddr() { + // Can't address to pass pointer into decoder. Let's make New addressable! + newArg := reflect.New(conf.Type()).Elem() + newArg.Set(conf) + conf = newArg + } + fillAddr = conf.Addr().Interface() + case reflect.Ptr: + if conf.IsNil() { + // Can't fill nil config. Init with zero. + conf = reflect.New(conf.Type().Elem()) + } + fillAddr = conf.Interface() + default: + panic("unexpected type " + conf.String()) + } + maybeConf = []reflect.Value{conf} + return +} + +func (e defaultConfigContainer) configRequired() bool { + return e.newValue.IsValid() +} + +func getFillConf(fillConfOptional []func(conf interface{}) error) func(interface{}) error { + expect(len(fillConfOptional) <= 1, "only fill config parameter could be passed") + if len(fillConfOptional) == 0 { + return nil + } + return fillConfOptional[0] +} + +func getNewDefaultConfig(newDefaultConfigOptional []interface{}) interface{} { + expect(len(newDefaultConfigOptional) <= 1, "too many arguments passed") + if len(newDefaultConfigOptional) == 0 { + return nil + } + return newDefaultConfigOptional[0] +} diff --git a/core/plugin/registry_test.go b/core/plugin/registry_test.go new file mode 100644 index 000000000..5eb1da323 --- /dev/null +++ b/core/plugin/registry_test.go @@ -0,0 +1,301 @@ +// Copyright (c) 2016 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package plugin + +import ( + "io" + "reflect" + + "github.com/mitchellh/mapstructure" + . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/extensions/table" + . "github.com/onsi/gomega" + "github.com/pkg/errors" +) + +var _ = Describe("new default config container", func() { + DescribeTable("expectation fail", + func(constructor interface{}, newDefaultConfigOptional ...interface{}) { + newDefaultConfig := getNewDefaultConfig(newDefaultConfigOptional) + defer recoverExpectationFail() + newDefaultConfigContainer(reflect.TypeOf(constructor), newDefaultConfig) + }, + Entry("invalid type", + func(int) ptestPlugin { return nil }), + Entry("invalid ptr type", + func(*int) ptestPlugin { return nil }), + Entry("to many args", + func(_, _ ptestConfig) ptestPlugin { return nil }), + Entry("default without config", + func() ptestPlugin { return nil }, func() *ptestConfig { return nil }), + Entry("invalid default config", + func(ptestConfig) ptestPlugin { return nil }, func() *ptestConfig { return nil }), + Entry("default config accepts args", + func(*ptestConfig) ptestPlugin { return nil }, func(int) *ptestConfig { return nil }), + ) + + DescribeTable("expectation ok", + func(constructor interface{}, newDefaultConfigOptional ...interface{}) { + newDefaultConfig := getNewDefaultConfig(newDefaultConfigOptional) + container := newDefaultConfigContainer(reflect.TypeOf(constructor), newDefaultConfig) + conf, err := container.Get(ptestFillConf) + Expect(err).NotTo(HaveOccurred()) + Expect(conf).To(HaveLen(1)) + ptestExpectConfigValue(conf[0].Interface(), ptestFilledValue) + }, + Entry("no default config", + ptestNewConf), + Entry("no default ptr config", + ptestNewPtrConf), + Entry("default config", + ptestNewConf, ptestDefaultConf), + Entry("default ptr config", + ptestNewPtrConf, ptestNewDefaultPtrConf), + ) + + It("fill no config failed", func() { + container := newDefaultConfigContainer(ptestNewErrType(), nil) + _, err := container.Get(ptestFillConf) + Expect(err).To(HaveOccurred()) + }) +}) + +var _ = Describe("registry", func() { + It("register name collision panics", func() { + r := NewRegistry() + r.ptestRegister(ptestNewImpl) + defer recoverExpectationFail() + r.ptestRegister(ptestNewImpl) + }) + + It("lookup", func() { + r := NewRegistry() + r.ptestRegister(ptestNewImpl) + Expect(r.Lookup(ptestType())).To(BeTrue()) + Expect(r.Lookup(reflect.TypeOf(0))).To(BeFalse()) + Expect(r.Lookup(reflect.TypeOf(&ptestImpl{}))).To(BeFalse()) + Expect(r.Lookup(reflect.TypeOf((*io.Writer)(nil)).Elem())).To(BeFalse()) + }) + + It("lookup factory", func() { + r := NewRegistry() + r.ptestRegister(ptestNewImpl) + Expect(r.LookupFactory(ptestNewType())).To(BeTrue()) + Expect(r.LookupFactory(ptestNewErrType())).To(BeTrue()) + + Expect(r.LookupFactory(reflect.TypeOf(0))).To(BeFalse()) + Expect(r.LookupFactory(reflect.TypeOf(&ptestImpl{}))).To(BeFalse()) + Expect(r.LookupFactory(reflect.TypeOf((*io.Writer)(nil)).Elem())).To(BeFalse()) + }) + +}) + +var _ = Describe("new", func() { + type New func(r *Registry, fillConfOptional ...func(conf interface{}) error) (interface{}, error) + var ( + r *Registry + testNew New + testNewOk = func(fillConfOptional ...func(conf interface{}) error) (pluginVal string) { + plugin, err := testNew(r, fillConfOptional...) + Expect(err).NotTo(HaveOccurred()) + return plugin.(*ptestImpl).Value + } + ) + BeforeEach(func() { r = NewRegistry() }) + runTestCases := func() { + Context("plugin constructor", func() { + It("no conf", func() { + r.ptestRegister(ptestNewImpl) + Expect(testNewOk()).To(Equal(ptestInitValue)) + }) + It("nil error", func() { + r.ptestRegister(ptestNewErr) + Expect(testNewOk()).To(Equal(ptestInitValue)) + }) + It("non-nil error", func() { + r.ptestRegister(ptestNewErrFailing) + _, err := testNew(r) + Expect(err).To(HaveOccurred()) + err = errors.Cause(err) + Expect(ptestCreateFailedErr).To(Equal(err)) + }) + It("no conf, fill conf error", func() { + r.ptestRegister(ptestNewImpl) + expectedErr := errors.New("fill conf err") + _, err := testNew(r, func(_ interface{}) error { return expectedErr }) + Expect(expectedErr).To(Equal(err)) + }) + It("no default", func() { + r.ptestRegister(ptestNewConf) + Expect(testNewOk()).To(Equal("")) + }) + It("default", func() { + r.ptestRegister(ptestNewConf, ptestDefaultConf) + Expect(testNewOk()).To(Equal(ptestDefaultValue)) + }) + It("fill conf default", func() { + r.ptestRegister(ptestNewConf, ptestDefaultConf) + Expect(testNewOk(ptestFillConf)).To(Equal(ptestFilledValue)) + }) + It("fill conf no default", func() { + r.ptestRegister(ptestNewConf) + Expect(testNewOk(ptestFillConf)).To(Equal(ptestFilledValue)) + }) + It("fill ptr conf no default", func() { + r.ptestRegister(ptestNewPtrConf) + Expect(testNewOk(ptestFillConf)).To(Equal(ptestFilledValue)) + }) + It("no default ptr conf not nil", func() { + r.ptestRegister(ptestNewPtrConf) + Expect("").To(Equal(testNewOk())) + }) + It("nil default, conf not nil", func() { + r.ptestRegister(ptestNewPtrConf, func() *ptestConfig { return nil }) + Expect(testNewOk()).To(Equal("")) + }) + It("fill nil default", func() { + r.ptestRegister(ptestNewPtrConf, func() *ptestConfig { return nil }) + Expect(testNewOk(ptestFillConf)).To(Equal(ptestFilledValue)) + }) + It("more than one fill conf panics", func() { + r.ptestRegister(ptestNewPtrConf) + defer recoverExpectationFail() + testNew(r, ptestFillConf, ptestFillConf) + }) + }) + + Context("factory constructor", func() { + It("no conf", func() { + r.ptestRegister(ptestNewFactory) + Expect(testNewOk()).To(Equal(ptestInitValue)) + }) + It("nil error", func() { + r.ptestRegister(func() (ptestPlugin, error) { + return ptestNewImpl(), nil + }) + Expect(testNewOk()).To(Equal(ptestInitValue)) + }) + It("non-nil error", func() { + r.ptestRegister(ptestNewFactoryFactoryErrFailing) + _, err := testNew(r) + Expect(err).To(HaveOccurred()) + err = errors.Cause(err) + Expect(ptestCreateFailedErr).To(Equal(err)) + }) + It("no conf, fill conf error", func() { + r.ptestRegister(ptestNewFactory) + expectedErr := errors.New("fill conf err") + _, err := testNew(r, func(_ interface{}) error { return expectedErr }) + Expect(expectedErr).To(Equal(err)) + }) + It("no default", func() { + r.ptestRegister(ptestNewFactoryConf) + Expect(testNewOk()).To(Equal("")) + }) + It("default", func() { + r.ptestRegister(ptestNewFactoryConf, ptestDefaultConf) + Expect(testNewOk()).To(Equal(ptestDefaultValue)) + }) + It("fill conf default", func() { + r.ptestRegister(ptestNewFactoryConf, ptestDefaultConf) + Expect(testNewOk(ptestFillConf)).To(Equal(ptestFilledValue)) + }) + It("fill conf no default", func() { + r.ptestRegister(ptestNewFactoryConf) + Expect(testNewOk(ptestFillConf)).To(Equal(ptestFilledValue)) + }) + It("fill ptr conf no default", func() { + r.ptestRegister(ptestNewFactoryPtrConf) + Expect(testNewOk(ptestFillConf)).To(Equal(ptestFilledValue)) + }) + It("no default ptr conf not nil", func() { + r.ptestRegister(ptestNewFactoryPtrConf) + Expect("").To(Equal(testNewOk())) + }) + It("nil default, conf not nil", func() { + r.ptestRegister(ptestNewFactoryPtrConf, func() *ptestConfig { return nil }) + Expect(testNewOk()).To(Equal("")) + }) + It("fill nil default", func() { + r.ptestRegister(ptestNewFactoryPtrConf, func() *ptestConfig { return nil }) + Expect(testNewOk(ptestFillConf)).To(Equal(ptestFilledValue)) + }) + It("more than one fill conf panics", func() { + r.ptestRegister(ptestNewFactoryPtrConf) + defer recoverExpectationFail() + testNew(r, ptestFillConf, ptestFillConf) + }) + }) + } + Context("use New", func() { + BeforeEach(func() { testNew = (*Registry).ptestNew }) + runTestCases() + + }) + Context("use NewFactory", func() { + BeforeEach(func() { testNew = (*Registry).ptestNewFactory }) + runTestCases() + }) + +}) + +var _ = Describe("decode", func() { + It("ok", func() { + r := NewRegistry() + const nameKey = "type" + + var hook mapstructure.DecodeHookFunc + decode := func(input, result interface{}) error { + decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + DecodeHook: hook, + ErrorUnused: true, + Result: result, + }) + if err != nil { + return err + } + return decoder.Decode(input) + } + hook = mapstructure.ComposeDecodeHookFunc( + func(from reflect.Type, to reflect.Type, data interface{}) (interface{}, error) { + if !r.Lookup(to) { + return data, nil + } + // NOTE: could be map[interface{}]interface{} here. + input := data.(map[string]interface{}) + // NOTE: should be case insensitive behaviour. + pluginName := input[nameKey].(string) + delete(input, nameKey) + return r.New(to, pluginName, func(conf interface{}) error { + // NOTE: should error, if conf has "type" field. + return decode(input, conf) + }) + }) + + r.Register(ptestType(), "my-plugin", ptestNewConf, ptestDefaultConf) + input := map[string]interface{}{ + "plugin": map[string]interface{}{ + nameKey: "my-plugin", + "value": ptestFilledValue, + }, + } + type Config struct { + Plugin ptestPlugin + } + var conf Config + err := decode(input, &conf) + Expect(err).NotTo(HaveOccurred()) + actualValue := conf.Plugin.(*ptestImpl).Value + Expect(actualValue).To(Equal(ptestFilledValue)) + }) + +}) + +func recoverExpectationFail() { + r := recover() + Expect(r).NotTo(BeNil()) + Expect(r).To(ContainSubstring("expectation failed")) +} diff --git a/core/provider/chunk_decoder.go b/core/provider/chunk_decoder.go new file mode 100644 index 000000000..0904b4497 --- /dev/null +++ b/core/provider/chunk_decoder.go @@ -0,0 +1,72 @@ +// Copyright (c) 2018 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package provider + +import ( + "bufio" + "fmt" + + "github.com/pkg/errors" + + "github.com/yandex/pandora/core" +) + +var ErrNoAmmoDecoded = fmt.Errorf("no ammo has been decoded from chunk") + +// ChunkAmmoDecoder accept data chunks that can contain encoded ammo or some meta information that +// changes ChunkAmmoDecoder state and affects next ammo decoding. +// For example, chunks are lines that contains HTTP URI to be transformed to http.Request or +// HTTP header to be added to next decoded http.Requests. +type ChunkAmmoDecoder interface { + // DecodeChunk accepts chunk of data, than decode it to ammo or change ChunkAmmoDecoder internal + // state. + // Returns nil on when ammo was successfully decoded. + // ErrNoAmmoDecoded MAY be returned, to indicate that chunk was accepted, but ammo were not + // decoded. + // Returns other non nil error, on chunk decode fail. + // Panics if ammo type is not supported. + DecodeChunk(chunk []byte, ammo core.Ammo) error +} + +func NewScanDecoder(scanner Scanner, decoder ChunkAmmoDecoder) *ScanAmmoDecoder { + return &ScanAmmoDecoder{scanner: scanner, decoder: decoder} +} + +// Scanner is interface of bufio.Scanner like scanners. +type Scanner interface { + Scan() bool + Bytes() []byte + Err() error +} + +var _ Scanner = &bufio.Scanner{} + +type ScanAmmoDecoder struct { + chunkCounter int + scanner Scanner + decoder ChunkAmmoDecoder +} + +var _ AmmoDecoder = &ScanAmmoDecoder{} + +func (d *ScanAmmoDecoder) Decode(ammo core.Ammo) error { + for { + if !d.scanner.Scan() { + return d.scanner.Err() + } + chunk := d.scanner.Bytes() + err := d.decoder.DecodeChunk(chunk, ammo) + if err == ErrNoAmmoDecoded { + continue + } + if err != nil { + return errors.Wrapf(err, "chunk %v decode failed", d.chunkCounter) + } + d.chunkCounter++ + return nil + } + +} diff --git a/core/provider/decoder.go b/core/provider/decoder.go new file mode 100644 index 000000000..f6c8735c4 --- /dev/null +++ b/core/provider/decoder.go @@ -0,0 +1,117 @@ +// Copyright (c) 2018 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package provider + +import ( + "context" + "fmt" + "io" + + "github.com/pkg/errors" + "go.uber.org/zap" + + "github.com/yandex/pandora/core" + "github.com/yandex/pandora/lib/errutil" + "github.com/yandex/pandora/lib/ioutil2" +) + +type NewAmmoDecoder func(deps core.ProviderDeps, source io.Reader) (AmmoDecoder, error) + +// TODO(skipo): test decoder that fills ammo with random data + +// AmmoEncoder MAY support only concrete type of ammo. +// AmmoDecoder SHOULD NOT be used after first decode fail. +type AmmoDecoder interface { + // Decode fills passed ammo with data. + // Returns non nil error on fail. + // Panics if ammo type is not supported. + Decode(ammo core.Ammo) error +} + +type AmmoDecoderFunc func(ammo core.Ammo) error + +func (f AmmoDecoderFunc) Decode(ammo core.Ammo) error { return f(ammo) } + +// TODO(skipor): test + +func NewDecodeProvider(newAmmo func() core.Ammo, newDecoder NewAmmoDecoder, conf DecodeProviderConfig) *DecodeProvider { + return &DecodeProvider{ + AmmoQueue: *NewAmmoQueue(newAmmo, conf.Queue), + newDecoder: newDecoder, + conf: conf, + } +} + +type DecodeProviderConfig struct { + Queue AmmoQueueConfig `config:",squash"` + Source core.DataSource `config:"source" validate:"required"` + // Limit limits total num of ammo. Unlimited if zero. + Limit int `validate:"min=0"` + // Passes limits ammo file passes. Unlimited if zero. + Passes int `validate:"min=0"` +} + +func DefaultDecodeProviderConfig() DecodeProviderConfig { + return DecodeProviderConfig{ + Queue: DefaultAmmoQueueConfig(), + } +} + +type DecodeProvider struct { + AmmoQueue + conf DecodeProviderConfig + newDecoder NewAmmoDecoder + core.ProviderDeps +} + +var _ core.Provider = &DecodeProvider{} + +func (p *DecodeProvider) Run(ctx context.Context, deps core.ProviderDeps) (err error) { + p.ProviderDeps = deps + defer close(p.OutQueue) + source, err := p.conf.Source.OpenSource() + if err != nil { + return errors.WithMessage(err, "data source open failed") + } + defer func() { + errutil.Join(err, errors.Wrap(source.Close(), "data source close failed")) + }() + + // Problem: can't use decoder after io.EOF, because decoder is invalidated. But decoder recreation + // is not efficient, when we have short data source. + // Now problem solved by using MultiPassReader, but in such case decoder don't know real input + // position, so can't put this important information in decode error. + // TODO(skipor): Let's add optional Reset(io.Reader) method, that will allow efficient Decoder reset after every pass. + multipassReader := ioutil2.NewMultiPassReader(source, p.conf.Passes) + if source == multipassReader { + p.Log.Info("Ammo data source can't sought, so will be read only once") + } + decoder, err := p.newDecoder(deps, multipassReader) + + if err != nil { + return errors.WithMessage(err, "decoder construction failed") + } + var ammoNum int + for ; p.conf.Limit <= 0 || ammoNum < p.conf.Limit; ammoNum++ { + ammo := p.InputPool.Get() + err = decoder.Decode(ammo) + if err == io.EOF { + p.Log.Info("Ammo finished", zap.Int("decoded", ammoNum)) + return nil + } + if err != nil { + return errors.WithMessage(err, fmt.Sprintf("ammo #%v decode failed", ammoNum)) + } + select { + case p.OutQueue <- ammo: + case <-ctx.Done(): + p.Log.Debug("Provider run context is Done", zap.Int("decoded", ammoNum+1)) + return nil + } + } + p.Log.Info("Ammo limit is reached", zap.Int("decoded", ammoNum)) + return nil +} diff --git a/core/provider/json.go b/core/provider/json.go new file mode 100644 index 000000000..586935c58 --- /dev/null +++ b/core/provider/json.go @@ -0,0 +1,83 @@ +// Copyright (c) 2018 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package provider + +import ( + "io" + + jsoniter "github.com/json-iterator/go" + + "github.com/yandex/pandora/core" + "github.com/yandex/pandora/core/coreutil" + "github.com/yandex/pandora/lib/ioutil2" +) + +// NewJSONProvider returns generic core.Provider that reads JSON data from source and decodes it +// into ammo returned from newAmmo. +func NewJSONProvider(newAmmo func() core.Ammo, conf JSONProviderConfig) core.Provider { + return NewCustomJSONProvider(nil, newAmmo, conf) +} + +// NewCustomJSONProvider is like NewJSONProvider, but also allows to wrap JSON decoder, to +// decode data into intermediate struct, but then transform in into desired ammo. +// For example, decode {"body":"some data"} into struct { Data string }, and transform it to +// http.Request. +func NewCustomJSONProvider(wrapDecoder func(deps core.ProviderDeps, decoder AmmoDecoder) AmmoDecoder, newAmmo func() core.Ammo, conf JSONProviderConfig) core.Provider { + var newDecoder NewAmmoDecoder = func(deps core.ProviderDeps, source io.Reader) (AmmoDecoder, error) { + decoder := NewJSONAmmoDecoder(source, conf.Buffer.BufferSizeOrDefault()) + if wrapDecoder != nil { + decoder = wrapDecoder(deps, decoder) + } + return decoder, nil + } + return NewDecodeProvider(newAmmo, newDecoder, conf.Decode) +} + +type JSONProviderConfig struct { + Decode DecodeProviderConfig `config:",squash"` + Buffer coreutil.BufferSizeConfig `config:",squash"` +} + +func DefaultJSONProviderConfig() JSONProviderConfig { + return JSONProviderConfig{Decode: DefaultDecodeProviderConfig()} +} + +func NewJSONAmmoDecoder(r io.Reader, buffSize int) AmmoDecoder { + var readError error + // HACK(skipor): jsoniter.Iterator don't handle read errors well, but jsoniter.Decoder don't allow to set buffer size. + var errTrackingReader ioutil2.ReaderFunc = func(p []byte) (n int, err error) { + n, err = r.Read(p) + if n > 0 { + // Need to suppress error, to distinguish parse error in last chunk and read error. + return n, nil + } + if err != nil { + readError = err + } + return n, err + } + return &JSONAmmoDecoder{ + iter: jsoniter.Parse(jsoniter.ConfigFastest, errTrackingReader, buffSize), + readErrorPtr: &readError, + } +} + +type JSONAmmoDecoder struct { + iter *jsoniter.Iterator + readErrorPtr *error +} + +func (d *JSONAmmoDecoder) Decode(ammo core.Ammo) error { + coreutil.ResetReusedAmmo(ammo) + d.iter.ReadVal(ammo) + if d.iter.Error != nil { + if *d.readErrorPtr != nil { + return *d.readErrorPtr + } + return d.iter.Error + } + return nil +} diff --git a/core/provider/json_test.go b/core/provider/json_test.go new file mode 100644 index 000000000..eca80711a --- /dev/null +++ b/core/provider/json_test.go @@ -0,0 +1,169 @@ +// Copyright (c) 2018 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package provider + +import ( + "context" + "io" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/yandex/pandora/core" + "github.com/yandex/pandora/core/datasource" +) + +type testJSONAmmo struct { + Id string + Data string +} + +// TODO(skipor): test this in decode provider, not json +func TestDecodeProviderPasses(t *testing.T) { + input := strings.NewReader(` {"data":"first"} `) + conf := DefaultJSONProviderConfig() + conf.Decode.Source = datasource.NewReader(input) + conf.Decode.Passes = 3 + newAmmo := func() core.Ammo { + return &testJSONAmmo{} + } + provider := NewJSONProvider(newAmmo, conf) + err := provider.Run(context.Background(), testDeps()) + require.NoError(t, err) + + expected := func(data string) *testJSONAmmo { + return &testJSONAmmo{Data: data} + } + ammo, ok := provider.Acquire() + require.True(t, ok) + assert.Equal(t, expected("first"), ammo) + + ammo, ok = provider.Acquire() + require.True(t, ok) + assert.Equal(t, expected("first"), ammo) + + ammo, ok = provider.Acquire() + require.True(t, ok) + assert.Equal(t, expected("first"), ammo) + + _, ok = provider.Acquire() + assert.False(t, ok) +} + +func TestCustomJSONProvider(t *testing.T) { + input := strings.NewReader(` {"data":"first"}`) + conf := DefaultJSONProviderConfig() + conf.Decode.Source = datasource.NewReader(input) + conf.Decode.Limit = 1 + newAmmo := func() core.Ammo { + return &testJSONAmmo{} + } + wrapDecoder := func(_ core.ProviderDeps, decoder AmmoDecoder) AmmoDecoder { + return AmmoDecoderFunc(func(ammo core.Ammo) error { + err := decoder.Decode(ammo) + if err != nil { + return err + } + ammo.(*testJSONAmmo).Data += " transformed" + return nil + }) + } + provider := NewCustomJSONProvider(wrapDecoder, newAmmo, conf) + err := provider.Run(context.Background(), testDeps()) + require.NoError(t, err) + expected := func(data string) *testJSONAmmo { + return &testJSONAmmo{Data: data} + } + ammo, ok := provider.Acquire() + require.True(t, ok) + assert.Equal(t, expected("first transformed"), ammo) + + _, ok = provider.Acquire() + assert.False(t, ok) +} + +func TestJSONProvider(t *testing.T) { + input := strings.NewReader(` {"data":"first"} +{"data":"second"} `) + conf := DefaultJSONProviderConfig() + conf.Decode.Source = datasource.NewReader(input) + conf.Decode.Limit = 3 + newAmmo := func() core.Ammo { + return &testJSONAmmo{} + } + provider := NewJSONProvider(newAmmo, conf) + err := provider.Run(context.Background(), testDeps()) + require.NoError(t, err) + + expected := func(data string) *testJSONAmmo { + return &testJSONAmmo{Data: data} + } + ammo, ok := provider.Acquire() + require.True(t, ok) + assert.Equal(t, expected("first"), ammo) + + ammo, ok = provider.Acquire() + require.True(t, ok) + assert.Equal(t, expected("second"), ammo) + + ammo, ok = provider.Acquire() + require.True(t, ok) + assert.Equal(t, expected("first"), ammo) + + _, ok = provider.Acquire() + assert.False(t, ok) +} + +func TestDecoderSimple(t *testing.T) { + var val struct { + Data string + } + input := strings.NewReader(`{"data":"first"}`) + decoder := NewJSONAmmoDecoder(input, 512) + err := decoder.Decode(&val) + require.NoError(t, err) + assert.Equal(t, "first", val.Data) + + err = decoder.Decode(&val) + require.Equal(t, io.EOF, err) +} + +func TestDecoderWhitespaces(t *testing.T) { + var val struct { + Data string + } + input := strings.NewReader(` {"data":"first"} + {"data":"second"} {"data":"third"} `) + decoder := NewJSONAmmoDecoder(input, 512) + err := decoder.Decode(&val) + require.NoError(t, err) + assert.Equal(t, "first", val.Data) + + err = decoder.Decode(&val) + require.NoError(t, err) + assert.Equal(t, "second", val.Data) + + err = decoder.Decode(&val) + require.NoError(t, err) + assert.Equal(t, "third", val.Data) + + err = decoder.Decode(&val) + require.Equal(t, io.EOF, err) +} + +func TestDecoderReset(t *testing.T) { + val := testJSONAmmo{ + Id: "id", + } + input := strings.NewReader(`{"data":"first"}`) + decoder := NewJSONAmmoDecoder(input, 512) + err := decoder.Decode(&val) + require.NoError(t, err) + assert.Equal(t, "first", val.Data) + assert.Zero(t, val.Id) +} diff --git a/core/provider/num.go b/core/provider/num.go new file mode 100644 index 000000000..401ca41af --- /dev/null +++ b/core/provider/num.go @@ -0,0 +1,54 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package provider + +import ( + "context" + + "github.com/yandex/pandora/core" +) + +// NewNum returns dummy provider, that provides 0, 1 .. n int sequence as ammo. +// May be useful for test or in when Gun don't need ammo. +func NewNum(limit int) core.Provider { + return &num{ + limit: limit, + sink: make(chan core.Ammo), + } +} + +type NumConfig struct { + Limit int +} + +func NewNumConf(conf NumConfig) core.Provider { + return NewNum(conf.Limit) +} + +type num struct { + i int + limit int + sink chan core.Ammo +} + +func (n *num) Run(ctx context.Context, _ core.ProviderDeps) error { + defer close(n.sink) + for ; n.limit <= 0 || n.i < n.limit; n.i++ { + select { + case n.sink <- n.i: + case <-ctx.Done(): + return nil + } + } + return nil +} + +func (n *num) Acquire() (a core.Ammo, ok bool) { + a, ok = <-n.sink + return +} + +func (n *num) Release(core.Ammo) {} diff --git a/core/provider/num_test.go b/core/provider/num_test.go new file mode 100644 index 000000000..a7722cb67 --- /dev/null +++ b/core/provider/num_test.go @@ -0,0 +1,64 @@ +package provider + +import ( + "context" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/yandex/pandora/core" +) + +var _ = Describe("Num", func() { + var ( + limit int + + p core.Provider + ctx context.Context + cancel context.CancelFunc + runRes chan error + ) + BeforeEach(func() { + limit = 0 + runRes = make(chan error) + }) + JustBeforeEach(func() { + ctx, cancel = context.WithCancel(context.Background()) + p = NewNumConf(NumConfig{limit}) + go func() { + runRes <- p.Run(ctx, core.ProviderDeps{}) + }() + }) + + It("unlimited", func() { + for i := 0; i < 100; i++ { + a, ok := p.Acquire() + Expect(ok).To(BeTrue()) + Expect(a).To(Equal(i)) + } + cancel() + Expect(<-runRes).To(BeNil()) + a, ok := p.Acquire() + Expect(ok).To(BeFalse()) + Expect(a).To(BeNil()) + }, 1) + + Context("unlimited", func() { + BeforeEach(func() { + limit = 50 + }) + It("", func() { + for i := 0; i < limit; i++ { + a, ok := p.Acquire() + Expect(ok).To(BeTrue()) + Expect(a).To(Equal(i)) + } + a, ok := p.Acquire() + Expect(ok).To(BeFalse()) + Expect(a).To(BeNil()) + Expect(<-runRes).To(BeNil()) + }, 1) + + }) + +}) diff --git a/core/provider/provider_suite_test.go b/core/provider/provider_suite_test.go new file mode 100644 index 000000000..9c500eede --- /dev/null +++ b/core/provider/provider_suite_test.go @@ -0,0 +1,16 @@ +package provider + +import ( + "testing" + + "github.com/yandex/pandora/core" + "github.com/yandex/pandora/lib/ginkgoutil" +) + +func TestProvider(t *testing.T) { + ginkgoutil.RunSuite(t, "AmmoQueue Suite") +} + +func testDeps() core.ProviderDeps { + return core.ProviderDeps{ginkgoutil.NewLogger()} +} diff --git a/core/provider/queue.go b/core/provider/queue.go new file mode 100644 index 000000000..ef208d5d6 --- /dev/null +++ b/core/provider/queue.go @@ -0,0 +1,52 @@ +// Copyright (c) 2018 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package provider + +import ( + "sync" + + "github.com/yandex/pandora/core" +) + +type AmmoQueueConfig struct { + // AmmoQueueSize is number maximum number of ready but not acquired ammo. + // On queue overflow, ammo decode is stopped. + AmmoQueueSize int `config:"ammo-queue-size" validate:"min=1"` +} + +const ( + shootsPerSecondUpperBound = 128 * 1024 + DefaultAmmoQueueSize = shootsPerSecondUpperBound / 16 +) + +func DefaultAmmoQueueConfig() AmmoQueueConfig { + return AmmoQueueConfig{ + AmmoQueueSize: DefaultAmmoQueueSize, + } +} + +func NewAmmoQueue(newAmmo func() core.Ammo, conf AmmoQueueConfig) *AmmoQueue { + return &AmmoQueue{ + OutQueue: make(chan core.Ammo, conf.AmmoQueueSize), + InputPool: sync.Pool{New: func() interface{} { + return newAmmo() + }}, + } +} + +type AmmoQueue struct { + OutQueue chan core.Ammo + InputPool sync.Pool +} + +func (p *AmmoQueue) Acquire() (core.Ammo, bool) { + ammo, ok := <-p.OutQueue + return ammo, ok +} + +func (p *AmmoQueue) Release(a core.Ammo) { + p.InputPool.Put(a) +} diff --git a/core/register/register.go b/core/register/register.go new file mode 100644 index 000000000..3fd301d0e --- /dev/null +++ b/core/register/register.go @@ -0,0 +1,45 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package register + +import ( + "github.com/yandex/pandora/core" + "github.com/yandex/pandora/core/plugin" +) + +func RegisterPtr(ptr interface{}, name string, newPlugin interface{}, defaultConfigOptional ...interface{}) { + plugin.Register(plugin.PtrType(ptr), name, newPlugin, defaultConfigOptional...) +} + +func Provider(name string, newProvider interface{}, defaultConfigOptional ...interface{}) { + var ptr *core.Provider + RegisterPtr(ptr, name, newProvider, defaultConfigOptional...) +} + +func Limiter(name string, newLimiter interface{}, defaultConfigOptional ...interface{}) { + var ptr *core.Schedule + RegisterPtr(ptr, name, newLimiter, defaultConfigOptional...) +} + +func Gun(name string, newGun interface{}, defaultConfigOptional ...interface{}) { + var ptr *core.Gun + RegisterPtr(ptr, name, newGun, defaultConfigOptional...) +} + +func Aggregator(name string, newAggregator interface{}, defaultConfigOptional ...interface{}) { + var ptr *core.Aggregator + RegisterPtr(ptr, name, newAggregator, defaultConfigOptional...) +} + +func DataSource(name string, newDataSource interface{}, defaultConfigOptional ...interface{}) { + var ptr *core.DataSource + RegisterPtr(ptr, name, newDataSource, defaultConfigOptional...) +} + +func DataSink(name string, newDataSink interface{}, defaultConfigOptional ...interface{}) { + var ptr *core.DataSink + RegisterPtr(ptr, name, newDataSink, defaultConfigOptional...) +} diff --git a/core/schedule/composite.go b/core/schedule/composite.go new file mode 100644 index 000000000..249ab70c7 --- /dev/null +++ b/core/schedule/composite.go @@ -0,0 +1,144 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package schedule + +import ( + "sync" + "time" + + "github.com/yandex/pandora/core" +) + +type CompositeConf struct { + Nested []core.Schedule `config:"nested"` +} + +func NewCompositeConf(conf CompositeConf) core.Schedule { + return NewComposite(conf.Nested...) +} + +func NewComposite(scheds ...core.Schedule) core.Schedule { + switch len(scheds) { + case 0: + return NewOnce(0) + case 1: + return scheds[0] + } + + var ( + left = make([]int, len(scheds)) + unknown bool // If meet any Left() < 0, all previous leftBefore is unknown + leftAccumulator int // If unknown, then at least leftBefore accumulated, else exactly leftBefore. + ) + for i := len(scheds) - 1; i >= 0; i-- { + left[i] = leftAccumulator + schedLeft := scheds[i].Left() + if schedLeft < 0 { + schedLeft = -1 + unknown = true + leftAccumulator = -1 + } + if !unknown { + leftAccumulator += schedLeft + } + } + + return &compositeSchedule{ + scheds: scheds, + leftAfter: left, + } +} + +type compositeSchedule struct { + // Under read lock, goroutine can read slices, it's values, and call values goroutine safe methods. + // Under write lock, goroutine can do anything. + rwMu sync.RWMutex + scheds []core.Schedule // At least once schedule. First schedule can be finished. + leftAfter []int // Tokens leftBefore, if known exactly, or at least tokens leftBefore otherwise. +} + +func (s *compositeSchedule) Start(startAt time.Time) { + s.rwMu.Lock() + defer s.rwMu.Unlock() + s.scheds[0].Start(startAt) +} +func (s *compositeSchedule) Next() (tx time.Time, ok bool) { + s.rwMu.RLock() + tx, ok = s.scheds[0].Next() + if ok { + s.rwMu.RUnlock() + return // Got token, all is good. + } + schedsLeft := len(s.scheds) + s.rwMu.RUnlock() + if schedsLeft == 1 { + return // All nested schedules has been finished, so composite is finished too. + } + // Current schedule is finished, but some are left. + // Let's start next, with got finish time from previous! + s.rwMu.Lock() + schedsLeftNow := len(s.scheds) + somebodyStartedNextBeforeUs := schedsLeftNow < schedsLeft + if somebodyStartedNextBeforeUs { + // Let's just take token. + tx, ok = s.scheds[0].Next() + s.rwMu.Unlock() + if ok || schedsLeftNow == 1 { + return + } + // Very strange. Schedule was started and drained while we was waiting for it. + // Should very rare, so let's just retry. + return s.Next() + } + s.startNext(tx) + tx, ok = s.scheds[0].Next() + s.rwMu.Unlock() + if !ok && schedsLeftNow > 1 { + // What? Schedule without any tokens? Okay, just retry. + return s.Next() + } + return +} + +func (s *compositeSchedule) Left() int { + s.rwMu.RLock() + schedsLeft := len(s.scheds) + leftAfter := int(s.leftAfter[0]) + left := s.scheds[0].Left() + s.rwMu.RUnlock() + if schedsLeft == 1 { + return left + } + if left == 0 { + if leftAfter >= 0 { + return leftAfter + } + // leftAfter was unknown, at schedule create moment. + // But now, it can be finished. Let's shift, and try one more time. + s.rwMu.Lock() + shedsLeftNow := len(s.scheds) + if shedsLeftNow == schedsLeft { + currentFinishTime, ok := s.scheds[0].Next() + if ok { + s.rwMu.Unlock() + panic("current schedule is not finished") + } + s.startNext(currentFinishTime) + } + s.rwMu.Unlock() + return s.Left() + } + if left < 0 { + return -1 + } + return left + leftAfter +} + +func (s *compositeSchedule) startNext(currentFinishTime time.Time) { + s.scheds = s.scheds[1:] + s.leftAfter = s.leftAfter[1:] + s.scheds[0].Start(currentFinishTime) +} diff --git a/core/schedule/composite_test.go b/core/schedule/composite_test.go new file mode 100644 index 000000000..e4e0dc86c --- /dev/null +++ b/core/schedule/composite_test.go @@ -0,0 +1,114 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package schedule + +import ( + "sync" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/yandex/pandora/core" + "github.com/yandex/pandora/core/coretest" + "go.uber.org/atomic" +) + +var _ = Describe("composite", func() { + It("empty", func() { + testee := NewComposite() + coretest.ExpectScheduleNexts(testee, 0) + }) + + It("only", func() { + testee := NewComposite(NewConst(1, time.Second)) + coretest.ExpectScheduleNexts(testee, 0, time.Second) + }) + + It("composite", func() { + testee := NewComposite( + NewConst(1, 2*time.Second), + NewOnce(2), + NewConst(0, 5*time.Second), + NewOnce(0), + NewOnce(1), + ) + coretest.ExpectScheduleNexts(testee, + 0, + time.Second, + + 2*time.Second, + 2*time.Second, + + 7*time.Second, + 7*time.Second, // Finish. + ) + }) + + // Load concurrently, and let race detector do it's work. + It("race", func() { + var ( + nexts []core.Schedule + tokensGot atomic.Int64 + tokensExpected int64 + ) + addOnce := func(v int64) { + nexts = append(nexts, NewOnce(v)) + tokensExpected += v + } + addOnce(100000) // Delay to start concurrent readers. + for i := 0; i < 100000; i++ { + // Some work for catching races. + addOnce(0) + addOnce(1) + } + testee := NewCompositeConf(CompositeConf{nexts}) + var wg sync.WaitGroup + for i := 0; i < 8; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for { + _, ok := testee.Next() + if !ok { + return + } + tokensGot.Inc() + } + }() + } + wg.Wait() + Expect(tokensGot.Load()).To(Equal(tokensExpected)) + }) + + It("left with unknown", func() { + unlimitedDuration := time.Second + testee := NewComposite( + NewUnlimited(unlimitedDuration), + NewOnce(0), + NewConst(1, 2*time.Second), + NewOnce(1), + ) + Expect(testee.Left()).To(Equal(-1)) + startAt := time.Now().Add(-unlimitedDuration) + testee.Start(startAt) + + unlimitedFinish := startAt.Add(unlimitedDuration) + sched := testee.(*compositeSchedule).scheds[0] + Expect(sched.(*unlimitedSchedule).finish).To(Equal(unlimitedFinish)) + + Expect(testee.Left()).To(Equal(3)) + + actualNexts := coretest.DrainScheduleDuration(testee, unlimitedFinish) + expectedNests := []time.Duration{ + 0, + time.Second, + 2 * time.Second, + 2 * time.Second, // Finish. + } + Expect(actualNexts).To(Equal(expectedNests)) + }) +}) diff --git a/core/schedule/const.go b/core/schedule/const.go new file mode 100644 index 000000000..c7dc92fb0 --- /dev/null +++ b/core/schedule/const.go @@ -0,0 +1,38 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package schedule + +import ( + "time" + + "github.com/yandex/pandora/core" +) + +type ConstConfig struct { + Ops float64 `validate:"min=0"` + Duration time.Duration `validate:"min-time=1ms"` +} + +func NewConstConf(conf ConstConfig) core.Schedule { + return NewConst(conf.Ops, conf.Duration) +} + +func NewConst(ops float64, duration time.Duration) core.Schedule { + if ops < 0 { + ops = 0 + } + xn := float64(duration) / 1e9 // Seconds. + n := int64(ops * xn) + return NewDoAtSchedule(duration, n, constDoAt(ops)) +} + +func constDoAt(ops float64) func(i int64) time.Duration { + billionDivOps := 1e9 / ops + return func(i int64) time.Duration { + return time.Duration(float64(i) * billionDivOps) + //return time.Duration(float64(i) * 1e9 / ops) + } +} diff --git a/core/schedule/do_at.go b/core/schedule/do_at.go new file mode 100644 index 000000000..e33b8ae23 --- /dev/null +++ b/core/schedule/do_at.go @@ -0,0 +1,64 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package schedule + +import ( + "time" + + "go.uber.org/atomic" + + "github.com/yandex/pandora/core" +) + +// DoAt returns when i'th operation should be performed, assuming that schedule +// started at 0. +type DoAt func(i int64) time.Duration + +func NewDoAtSchedule(duration time.Duration, n int64, doAt DoAt) core.Schedule { + return &doAtSchedule{ + duration: duration, + n: n, + doAt: doAt, + } +} + +type doAtSchedule struct { + duration time.Duration + n int64 + i atomic.Int64 + doAt func(i int64) time.Duration + + StartSync + start time.Time +} + +func (s *doAtSchedule) Start(startAt time.Time) { + s.MarkStarted() + s.startOnce.Do(func() { + s.start = startAt + }) +} + +func (s *doAtSchedule) Next() (tx time.Time, ok bool) { + s.startOnce.Do(func() { + // No allocations here due to benchmark. + s.MarkStarted() + s.start = time.Now() + }) + i := s.i.Inc() - 1 + if i >= s.n { + return s.start.Add(s.duration), false + } + return s.start.Add(s.doAt(i)), true +} + +func (s *doAtSchedule) Left() int { + left := int(s.n - s.i.Load()) + if left < 0 { + return 0 + } + return left +} diff --git a/core/schedule/line.go b/core/schedule/line.go new file mode 100644 index 000000000..4e9860ebe --- /dev/null +++ b/core/schedule/line.go @@ -0,0 +1,49 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package schedule + +import ( + "math" + "time" + + "github.com/yandex/pandora/core" +) + +func NewLine(from, to float64, duration time.Duration) core.Schedule { + if from == to { + return NewConst(from, duration) + } + a := (to - from) / float64(duration/1e9) + b := from + xn := float64(duration) / 1e9 + n := int64(a*xn*xn/2 + b*xn) + return NewDoAtSchedule(duration, n, lineDoAt(a, b)) +} + +type LineConfig struct { + From float64 `validate:"min=0"` + To float64 `validate:"min=0"` + Duration time.Duration `validate:"min-time=1ms"` +} + +func NewLineConf(conf LineConfig) core.Schedule { + return NewLine(conf.From, conf.To, conf.Duration) +} + +// x - duration from 0 to max. +// RPS(x) = a * x + b // Line RPS schedule. +// Number of shots from 0 to x = integral(RPS) from 0 to x = (a*x^2)/2 + b*x +// Has shoot i. When it should be? i = (a*x^2)/2 + b*x => x = (sqrt(2*a*i + b^2) - b) / a +func lineDoAt(a, b float64) func(i int64) time.Duration { + // Some common calculations. + twoA := 2 * a + bSquare := b * b + bilionDivA := 1e9 / a + return func(i int64) time.Duration { + //return time.Duration((math.Sqrt(2*a*float64(i)+b*b) - b) * 1e9 / a) + return time.Duration((math.Sqrt(twoA*float64(i)+bSquare) - b) * bilionDivA) + } +} diff --git a/core/schedule/once.go b/core/schedule/once.go new file mode 100644 index 000000000..67e3be2e9 --- /dev/null +++ b/core/schedule/once.go @@ -0,0 +1,28 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package schedule + +import ( + "time" + + "github.com/yandex/pandora/core" +) + +// NewOnce returns schedule that emits all passed operation token at start time. +// That is, is schedule for zero duration, unlimited RPS, and n operations. +func NewOnce(n int64) core.Schedule { + return NewDoAtSchedule(0, n, func(i int64) time.Duration { + return 0 + }) +} + +type OnceConfig struct { + Times int64 `validate:"min=1"` // N is decoded like bool +} + +func NewOnceConf(conf OnceConfig) core.Schedule { + return NewOnce(conf.Times) +} diff --git a/core/schedule/schedule_suite_test.go b/core/schedule/schedule_suite_test.go new file mode 100644 index 000000000..0658482bd --- /dev/null +++ b/core/schedule/schedule_suite_test.go @@ -0,0 +1,234 @@ +package schedule + +import ( + "sort" + "testing" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/yandex/pandora/core" + "github.com/yandex/pandora/core/coretest" + "github.com/yandex/pandora/lib/ginkgoutil" +) + +func TestSchedule(t *testing.T) { + ginkgoutil.RunSuite(t, "Schedule Suite") +} + +var _ = Describe("unlimited", func() { + It("", func() { + conf := UnlimitedConfig{50 * time.Millisecond} + testee := NewUnlimitedConf(conf) + start := time.Now() + finish := start.Add(conf.Duration) + Expect(testee.Left()).To(BeEquivalentTo(-1)) + testee.Start(start) + var i int + for prev := time.Now(); ; i++ { + left := testee.Left() + x, ok := testee.Next() + if !ok { + break + } + Expect(left).To(BeEquivalentTo(-1)) + Expect(x).To(BeTemporally(">", prev)) + Expect(x).To(BeTemporally("<", finish)) + } + Expect(testee.Left()).To(BeEquivalentTo(0)) + Expect(i).To(BeNumerically(">", 50)) + }) +}) + +var _ = Describe("once", func() { + It("started", func() { + testee := NewOnce(1) + coretest.ExpectScheduleNexts(testee, 0, 0) + }) + + It("unstarted", func() { + testee := NewOnce(1) + start := time.Now() + x1, ok := testee.Next() + threshold := time.Since(start) + + Expect(ok).To(BeTrue()) + Expect(x1).To(BeTemporally("~", start, threshold)) + + x2, ok := testee.Next() + Expect(ok).To(BeFalse()) + Expect(x2).To(Equal(x1)) + }) + +}) + +var _ = Describe("const", func() { + var ( + conf ConstConfig + testee core.Schedule + underlying *doAtSchedule + ) + + JustBeforeEach(func() { + testee = NewConstConf(conf) + underlying = testee.(*doAtSchedule) + }) + + Context("non-zero ops", func() { + BeforeEach(func() { + conf = ConstConfig{ + Ops: 1, + Duration: 2 * time.Second, + } + }) + It("", func() { + Expect(underlying.n).To(BeEquivalentTo(2)) + coretest.ExpectScheduleNexts(testee, 0, time.Second, 2*time.Second) + }) + }) + + Context("zero ops", func() { + BeforeEach(func() { + conf = ConstConfig{ + Ops: 0, + Duration: 2 * time.Second, + } + }) + It("", func() { + Expect(underlying.n).To(BeEquivalentTo(0)) + coretest.ExpectScheduleNexts(testee, 2*time.Second) + }) + }) +}) + +var _ = Describe("line", func() { + var ( + conf LineConfig + testee core.Schedule + underlying *doAtSchedule + ) + + JustBeforeEach(func() { + testee = NewLineConf(conf) + underlying = testee.(*doAtSchedule) + }) + + Context("too small ops", func() { + BeforeEach(func() { + conf = LineConfig{ + From: 0, + To: 1.999, + Duration: time.Second, + } + }) + It("", func() { + // Too small ops, so should not do anything. + Expect(underlying.n).To(BeEquivalentTo(0)) + coretest.ExpectScheduleNexts(testee, time.Second) + }) + }) + + Context("const ops", func() { + BeforeEach(func() { + conf = LineConfig{ + From: 1, + To: 1, + Duration: 2 * time.Second, + } + }) + + It("", func() { + Expect(underlying.n).To(BeEquivalentTo(2)) + coretest.ExpectScheduleNexts(testee, 0, time.Second, 2*time.Second) + }) + }) + + Context("zero start", func() { + BeforeEach(func() { + conf = LineConfig{ + From: 0, + To: 1, + Duration: 2 * time.Second, + } + }) + + It("", func() { + Expect(underlying.n).To(BeEquivalentTo(1)) + coretest.ExpectScheduleNexts(testee, 0, 2*time.Second) + }) + }) + + Context("non zero start", func() { + BeforeEach(func() { + conf = LineConfig{ + From: 2, + To: 8, + Duration: 2 * time.Second, + } + }) + + It("", func() { + Expect(underlying.n).To(BeEquivalentTo(10)) + start := time.Now() + testee.Start(start) + + var ( + i int + xs []time.Time + x time.Time + ) + for ok := true; ok; i++ { + x, ok = testee.Next() + xs = append(xs, x) + } + Expect(i).To(Equal(11)) + Expect(sort.SliceIsSorted(xs, func(i, j int) bool { + return xs[i].Before(xs[j]) + })).To(BeTrue()) + Expect(start.Add(conf.Duration)).To(Equal(xs[len(xs)-1])) + }) + }) + +}) + +func BenchmarkLineSchedule(b *testing.B) { + schedule := NewLine(0, float64(b.N), 2*time.Second) + benchmarkScheduleNext(b, schedule) +} + +func BenchmarkLineScheduleParallel(b *testing.B) { + schedule := NewLine(0, float64(b.N), 2*time.Second) + benchmarkScheduleNextParallel(b, schedule) +} + +func BenchmarkUnlimitedSchedule(b *testing.B) { + schedule := NewUnlimited(time.Minute) + benchmarkScheduleNext(b, schedule) +} + +func BenchmarkUnlimitedScheduleParallel(b *testing.B) { + schedule := NewUnlimited(time.Minute) + benchmarkScheduleNextParallel(b, schedule) +} + +func benchmarkScheduleNextParallel(b *testing.B, schedule core.Schedule) { + run := func(pb *testing.PB) { + for pb.Next() { + schedule.Next() + } + } + schedule.Start(time.Now()) + b.ReportAllocs() + b.ResetTimer() + b.RunParallel(run) +} + +func benchmarkScheduleNext(b *testing.B, schedule core.Schedule) { + schedule.Start(time.Now()) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + schedule.Next() + } +} diff --git a/core/schedule/start_sync.go b/core/schedule/start_sync.go new file mode 100644 index 000000000..4b2b5215f --- /dev/null +++ b/core/schedule/start_sync.go @@ -0,0 +1,29 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package schedule + +import ( + "go.uber.org/atomic" + + "sync" +) + +// StartSync is util to make schedule start goroutine safe. +// See doAtSchedule as example. +type StartSync struct { + started atomic.Bool + startOnce sync.Once +} + +func (s *StartSync) IsStarted() bool { + return s.started.Load() +} + +func (s *StartSync) MarkStarted() { + if s.started.Swap(true) { + panic("schedule is already started") + } +} diff --git a/core/schedule/unlilmited.go b/core/schedule/unlilmited.go new file mode 100644 index 000000000..bd9ee8913 --- /dev/null +++ b/core/schedule/unlilmited.go @@ -0,0 +1,58 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package schedule + +import ( + "time" + + "github.com/yandex/pandora/core" +) + +// NewUnlimited returns schedule that generates unlimited ops for passed duration. +func NewUnlimited(duration time.Duration) core.Schedule { + return &unlimitedSchedule{duration: duration} +} + +type UnlimitedConfig struct { + Duration time.Duration `validate:"min-time=1ms"` +} + +func NewUnlimitedConf(conf UnlimitedConfig) core.Schedule { + return NewUnlimited(conf.Duration) +} + +type unlimitedSchedule struct { + duration time.Duration + + StartSync + finish time.Time +} + +func (s *unlimitedSchedule) Start(startAt time.Time) { + s.MarkStarted() + s.startOnce.Do(func() { + s.finish = startAt.Add(s.duration) + }) +} + +func (s *unlimitedSchedule) Next() (tx time.Time, ok bool) { + s.startOnce.Do(func() { + s.MarkStarted() + s.finish = time.Now().Add(s.duration) + }) + now := time.Now() + if now.Before(s.finish) { + return now, true + } + return s.finish, false +} + +func (s *unlimitedSchedule) Left() int { + if !s.IsStarted() || time.Now().Before(s.finish) { + return -1 + } + return 0 +} diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 000000000..4e1471648 --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,20 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +SPHINXPROJ = Pandora +SOURCEDIR = . +BUILDDIR = _build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) \ No newline at end of file diff --git a/docs/advanced.rst b/docs/advanced.rst new file mode 100644 index 000000000..03d2d0a52 --- /dev/null +++ b/docs/advanced.rst @@ -0,0 +1,126 @@ +Ammo providers +============== + +Ammo provider is a source of test data: it makes ammo object. + +There is a common rule for any (built-in) provider: data supplied by ammo provider are records that will be pushed via established connection to external host (defined in pandora config via `pool.gun.target` option). Thus, you cannot define in the ammofile to which `physical` host your ammo will be sent. + + +http/json +--------- + +jsonline format, 1 row — 1 json-encoded ammo. + +Pay attention to special header `Host` defined ``outside`` of Headers dictionary. + +`Host` inside Headers section will be silently ignored. + +Ammofile sample: +:: + + {"uri": "/", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "User-Agent": "Pandora"}, "host": "example.com"} + +Config sample: + +.. code-block:: yaml + + pools: + - ammo: + type: http/json # ammo format + file: ./ammofile # ammo file path + + +raw (request-style) +------------------- + +Raw HTTP request format. If you like to use `telnet` firing HTTP requests, you'll love this. +Also known as phantom's `request-style`. + +File contains size-prefixed HTTP requests. Each ammo is prefixed with a header line (delimited with \n), which consists of +two fields delimited by a space: ammo size and tag. Ammo size is in bytes (integer, including special characters like CR, LF). +Tag is a string. +You can read about this format (with detailed instructions) at +`Yandex.Tank documentation `_ + +Ammofile sample: +:: + + 73 good + GET / HTTP/1.0 + Host: xxx.tanks.example.com + User-Agent: xxx (shell 1) + + 77 bad + GET /abra HTTP/1.0 + Host: xxx.tanks.example.com + User-Agent: xxx (shell 1) + + 78 unknown + GET /ab ra HTTP/1.0 + Host: xxx.tanks.example.com + User-Agent: xxx (shell 1) + + +Config sample: + +.. code-block:: yaml + + pools: + - ammo: + type: raw # ammo format + file: ./ammofile # ammo file path + +You can redefine any headers using special config option `headers`. Format: list of strings. + +Example: + +.. code-block:: yaml + + pools: + - ammo: + type: raw # ammo format + file: ./ammofile # ammo file path + headers: + - "[Host: yourhost.tld]" + - "[User-Agent: some user agent]" + +uri-style +--------- + +List of URIs and headers + +Ammofile sample: +:: + + [Connection: close] + [Host: your.host.tld] + [Cookie: None] + /?drg tag1 + / + /buy tag2 + [Cookie: test] + /buy/?rt=0&station_to=7&station_from=9 + +Config sample: + +.. code-block:: yaml + + pools: + - ammo: + type: uri # ammo format + file: ./ammofile # ammo file path + + +You can redefine any headers using special config option `headers`. Format: list of strings. + +Example: + +.. code-block:: yaml + + pools: + - ammo: + type: uri # ammo format + file: ./ammofile # ammo file path + headers: + - "[Host: yourhost.tld]" + - "[User-Agent: some user agent]" diff --git a/docs/architecture.graphml b/docs/architecture.graphml index 96ab61d17..702ff3afa 100644 --- a/docs/architecture.graphml +++ b/docs/architecture.graphml @@ -1,6 +1,6 @@ - + @@ -20,8 +20,8 @@ - Ammo -Decoder + Ammo +Provider @@ -38,26 +38,8 @@ Decoder - ammo -file - - - - - - - - - - - - - - - - - Ammo -Provider + ammo +source @@ -68,17 +50,16 @@ Provider - + - - + - UserPool + Instances Pool @@ -97,14 +78,14 @@ Provider - - + + - User + Instance @@ -115,7 +96,7 @@ Provider - + @@ -132,13 +113,13 @@ Provider - + - + - UserLimiter + RPS Scheduler @@ -149,13 +130,13 @@ Provider - + - User + Instance @@ -166,7 +147,7 @@ Provider - + @@ -183,13 +164,13 @@ Provider - + - + - UserLimiter + RPS Scheduler @@ -202,14 +183,14 @@ Provider - + - Startup -Limiter + Startup +Scheduler @@ -220,15 +201,13 @@ Limiter - - + - + - Result -Listener + Aggregator @@ -239,14 +218,14 @@ Listener - + - Ammo -Decoder + Ammo +Provider @@ -257,32 +236,14 @@ Decoder - + - ammo -file - - - - - - - - - - - - - - - - - Ammo -Provider + ammo +source @@ -293,9 +254,8 @@ Provider - + - @@ -303,7 +263,7 @@ Provider - UserPool + Instances Pool @@ -322,14 +282,14 @@ Provider - - + + - User + Instance @@ -340,7 +300,7 @@ Provider - + @@ -357,13 +317,13 @@ Provider - + - + - UserLimiter + RPS Scheduler @@ -374,13 +334,13 @@ Provider - + - User + Instance @@ -391,7 +351,7 @@ Provider - + @@ -408,13 +368,13 @@ Provider - + - + - UserLimiter + RPS Scheduler @@ -427,14 +387,14 @@ Provider - + - Startup -Limiter + Startup +Scheduler @@ -451,8 +411,8 @@ Limiter - json -docs + plain +data @@ -464,17 +424,7 @@ docs - - - - - - - - - - - + @@ -484,8 +434,7 @@ docs - - + @@ -495,8 +444,7 @@ docs - - + @@ -515,7 +463,7 @@ object - + @@ -525,7 +473,7 @@ object - + @@ -535,8 +483,7 @@ object - - + @@ -555,8 +502,7 @@ object - - + @@ -566,14 +512,14 @@ object - + - json -docs + plain +data @@ -585,17 +531,7 @@ docs - - - - - - - - - - - + @@ -605,8 +541,7 @@ docs - - + @@ -616,8 +551,7 @@ docs - - + @@ -636,7 +570,7 @@ object - + @@ -646,7 +580,7 @@ object - + @@ -656,8 +590,7 @@ object - - + @@ -676,8 +609,7 @@ object - - + @@ -687,8 +619,7 @@ object - - + @@ -698,8 +629,7 @@ object - - + @@ -709,8 +639,7 @@ object - - + @@ -720,14 +649,13 @@ object - - + - Sample + Sample diff --git a/docs/architecture.png b/docs/architecture.png index 6e786e20e..965f1eb3a 100644 Binary files a/docs/architecture.png and b/docs/architecture.png differ diff --git a/docs/architecture.rst b/docs/architecture.rst new file mode 100644 index 000000000..8a1fcf479 --- /dev/null +++ b/docs/architecture.rst @@ -0,0 +1,62 @@ +Architectural overview +====================== + +Architectural scheme +-------------------- + +You can download architectural scheme source `here `_. Open it with `YeD `_ editor. + +.. image:: architecture.png + :align: center + :alt: architectural scheme + +Pandora is a set of components talking to each other through Go channels. There are different types of components. + +Component types +--------------- + +Ammo Provider ++++++++++++++ + +Ammo Provider knows how to make an ammo object from an ammo file or other external resource. Instances get ammo objects +from Ammo Provider. + +Instances Pool +++++++++++++++ + +Instances Pool manages the creation of Instances. You can think of one Instance as a single user that sends requests to +a server sequentially. All Instances from one Instances Pool get their ammo from one Ammo Provider. Instances creation +times are controlled by Startup Scheduler. All Instances from one Instances Pool also have Guns of the same type. + +Scheduler ++++++++++ + +Scheduler controls other events' times by pushing messages to its underlying channel according to the Schedule. +It can control Instances startup times, RPS amount (requests per second) or other processes. + +By combining two types of Schedulers, RPS Scheduler and Instance Startup Scheduler, you can simulate different types of load. +Instace Startup Scheduler controls the level of parallelism and RPS Scheduler controls throughput. + +RPS Scheduler can limit throughput of a whole instances pool, i.e. 10 RPS on 10 instances means 10 RPS overall, or +limit throughput of each instance in a pool individually, i.e. 10 RPS on each of 10 instances means 100 RPS overall. + +If you set RPS Scheduler to 'unlimited' and then gradually raise the number of Instances in your system by using Instance +Startup Scheduler, you'll be able to study the `scalability `_ +of your service. + +If you set Instances count to a big, unchanged value (you can estimate the needed amount by using +`Little's Law `_) and then gradually raise the RPS by using RPS Scheduler, +you'll be able to simulate Internet and push your service to its limits. + +You can also combine two methods mentioned above. + +Instances and Guns +++++++++++++++++++ + +Instances takes an ammo, waits for a Scheduler tick and then shoots with a Gun it has. Gun is a tool that sends +a request to your service and measures the parameters (time, error codes, etc.) of the response. + +Aggregator +++++++++++ + +Aggregator collects measured samples and saves them somewhere. \ No newline at end of file diff --git a/docs/conf.py b/docs/conf.py new file mode 100644 index 000000000..4e94b9f06 --- /dev/null +++ b/docs/conf.py @@ -0,0 +1,155 @@ +# -*- coding: utf-8 -*- +# +# Configuration file for the Sphinx documentation builder. +# +# This file does only contain a selection of the most common options. For a +# full list see the documentation: +# http://www.sphinx-doc.org/en/stable/config + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +# import os +# import sys +# sys.path.insert(0, os.path.abspath('.')) + + +# -- Project information ----------------------------------------------------- + +project = 'Pandora' +copyright = '2018, Alexey Lavrenuke, Vladimir Skipor' +author = 'Alexey Lavrenuke, Vladimir Skipor' + +# The short X.Y version +version = '' +# The full version, including alpha/beta/rc tags +release = '' + + +# -- General configuration --------------------------------------------------- + +# If your documentation needs a minimal Sphinx version, state it here. +# +# needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ +] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# +# source_suffix = ['.rst', '.md'] +source_suffix = '.rst' + +# The master toctree document. +master_doc = 'index' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = None + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path . +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'sphinx_rtd_theme' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# +# html_theme_options = {} + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +# Custom sidebar templates, must be a dictionary that maps document names +# to template names. +# +# The default sidebars (for documents that don't match any pattern) are +# defined by theme itself. Builtin themes are using these templates by +# default: ``['localtoc.html', 'relations.html', 'sourcelink.html', +# 'searchbox.html']``. +# +# html_sidebars = {} + + +# -- Options for HTMLHelp output --------------------------------------------- + +# Output file base name for HTML help builder. +htmlhelp_basename = 'Pandoradoc' + + +# -- Options for LaTeX output ------------------------------------------------ + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # + # 'papersize': 'letterpaper', + + # The font size ('10pt', '11pt' or '12pt'). + # + # 'pointsize': '10pt', + + # Additional stuff for the LaTeX preamble. + # + # 'preamble': '', + + # Latex figure (float) alignment + # + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + (master_doc, 'Pandora.tex', 'Pandora Documentation', + 'Alexey Lavrenuke, Vladimir Skipor', 'manual'), +] + + +# -- Options for manual page output ------------------------------------------ + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + (master_doc, 'pandora', 'Pandora Documentation', + [author], 1) +] + + +# -- Options for Texinfo output ---------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + (master_doc, 'Pandora', 'Pandora Documentation', + author, 'Pandora', 'One line description of project.', + 'Miscellaneous'), +] \ No newline at end of file diff --git a/docs/custom.rst b/docs/custom.rst new file mode 100644 index 000000000..c72c1a537 --- /dev/null +++ b/docs/custom.rst @@ -0,0 +1,389 @@ +Custom guns +=========== + + +Basic tutorial +-------------- +You can create you own Golang-based gun with `pandora`. + +There is an example of custom gun shooting via gRPC. + +We create a new gun and define ```shoot``` method for it w/ our test logic. + +You can find examples of custom guns' code below: + - `gRPC <#gRPC>`_ + - `Websockets <#Websockets>`_ + +Now it's time to compile our gun. Install deps and compile your custom gun file (```go build my_custom_gun.go```). + +After that step you'll get ```my_custom_gun``` binary file, it is compiled pandora with your custom gun inside. + +Now its time to create ```load.yaml```: + +.. code-block:: yaml + + pools: + - id: HTTP pool + gun: + type: my_custom_gun_name # custom gun name (specified at `register.Gun("my_custom_gun_name", ...`) + target: "your_grpc_host:your_grpc_port" + ammo: + type: custom_provider + source: + type: file + path: ./json.ammo + result: + type: phout + destination: ./phout.log + rps: {duration: 30s, type: line, from: 1, to: 2} + startup: + type: once + times: 10 + log: + level: error + +And create ammofile ```./json.ammo```: + +.. code-block:: yaml + + {"tag": "/MyCase1", "Param1": "146837693,146837692,146837691"} + {"tag": "/MyCase2", "Param2": "555", "Param1": "500002"} + + +We are ready to shoot. Try it. + + +gRPC +---- + +.. code-block:: go + + // create a package + package main + + // import some pandora stuff + // and stuff you need for your scenario + // and protobuf contracts for your grpc service + + import ( + "log" + "context" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/ptypes/timestamp" + "github.com/satori/go.uuid" + "github.com/spf13/afero" + "google.golang.org/grpc" + pb "my_package/my_protobuf_contracts" + + "github.com/yandex/pandora/cli" + "github.com/yandex/pandora/components/phttp/import" + "github.com/yandex/pandora/core" + "github.com/yandex/pandora/core/aggregator/netsample" + "github.com/yandex/pandora/core/import" + "github.com/yandex/pandora/core/register" + ) + + type Ammo struct { + Tag string + Param1 string + Param2 string + Param3 string + } + + type Sample struct { + URL string + ShootTimeSeconds float64 + } + + type GunConfig struct { + Target string `validate:"required"` // Configuration will fail, without target defined + } + + type Gun struct { + // Configured on construction. + client grpc.ClientConn + conf GunConfig + // Configured on Bind, before shooting + aggr core.Aggregator // May be your custom Aggregator. + core.GunDeps + } + + func NewGun(conf GunConfig) *Gun { + return &Gun{conf: conf} + } + + func (g *Gun) Bind(aggr core.Aggregator, deps core.GunDeps) error { + // create gRPC stub at gun initialization + conn, err := grpc.Dial( + g.conf.Target, + grpc.WithInsecure(), + grpc.WithTimeout(time.Second), + grpc.WithUserAgent("load test, pandora custom shooter")) + if err != nil { + log.Fatalf("FATAL: %s", err) + } + g.client = *conn + g.aggr = aggr + g.GunDeps = deps + return nil + } + + func (g *Gun) Shoot(ammo core.Ammo) { + customAmmo := ammo.(*Ammo) + g.shoot(customAmmo) + } + + + func (g *Gun) case1_method(client pb.MyClient, ammo *Ammo) int { + code := 0 + // prepare list of ids from ammo + var itemIDs []int64 + for _, id := range strings.Split(ammo.Param1, ",") { + if id == "" { + continue + } + itemID, err := strconv.ParseInt(id, 10, 64) + if err != nil { + log.Printf("Ammo parse FATAL: %s", err) + code = 314 + } + itemIDs = append(itemIDs, itemID) + } + + out, err := client.GetSomeData( + context.TODO(), &pb.ItemsRequest{ + itemIDs}) + + if err != nil { + log.Printf("FATAL: %s", err) + code = 500 + } + + if out != nil { + code = 200 + } + return code + } + + func (g *Gun) case2_method(client pb.MyClient, ammo *Ammo) int { + code := 0 + // prepare item_id and warehouse_id + item_id, err := strconv.ParseInt(ammo.Param1, 10, 0) + if err != nil { + log.Printf("Failed to parse ammo FATAL", err) + code = 314 + } + warehouse_id, err2 := strconv.ParseInt(ammo.Param2, 10, 0) + if err2 != nil { + log.Printf("Failed to parse ammo FATAL", err2) + code = 314 + } + + items := []*pb.SomeItem{} + items = append(items, &pb.SomeItem{ + item_id, + warehouse_id, + 1, + ×tamp.Timestamp{time.Now().Unix(), 111} + }) + + out2, err3 := client.GetSomeDataSecond( + context.TODO(), &pb.SomeRequest{ + uuid.Must(uuid.NewV4()).String(), + 1, + items}) + if err3 != nil { + log.Printf("FATAL", err3) + code = 316 + } + + if out2 != nil { + code = 200 + } + + + return code + } + + func (g *Gun) shoot(ammo *Ammo) { + code := 0 + sample := netsample.Acquire(ammo.Tag) + + conn := g.client + client := pb.NewClient(&conn) + + switch ammo.Tag { + case "/MyCase1": + code = g.case1_method(client, ammo) + case "/MyCase2": + code = g.case2_method(client, ammo) + default: + code = 404 + } + + defer func() { + sample.SetProtoCode(code) + g.aggr.Report(sample) + }() + } + + func main() { + //debug.SetGCPercent(-1) + // Standard imports. + fs := afero.NewOsFs() + coreimport.Import(fs) + // May not be imported, if you don't need http guns and etc. + phttp.Import(fs) + + // Custom imports. Integrate your custom types into configuration system. + coreimport.RegisterCustomJSONProvider("custom_provider", func() core.Ammo { return &Ammo{} }) + + register.Gun("my_custom_gun_name", NewGun, func() GunConfig { + return GunConfig{ + Target: "default target", + } + }) + + cli.Run() + } + + +Websockets +---------- + +.. code-block:: go + + package main + + import ( + "bytes" + "encoding/json" + "io/ioutil" + "log" + "math/rand" + "mime/multipart" + "net/http" + "net/url" + "strconv" + "time" + + "github.com/gorilla/websocket" + "github.com/spf13/afero" + + "github.com/yandex/pandora/cli" + "github.com/yandex/pandora/components/phttp/import" + "github.com/yandex/pandora/core" + "github.com/yandex/pandora/core/aggregator/netsample" + "github.com/yandex/pandora/core/import" + "github.com/yandex/pandora/core/register" + ) + + type Ammo struct { + Tag string + } + + type Sample struct { + URL string + ShootTimeSeconds float64 + } + + type GunConfig struct { + Target string `validate:"required"` + Handler string `validate:"required"`// Configuration will fail, without target defined + } + + type Gun struct { + // Configured on construction. + client websocket.Conn + conf GunConfig + // Configured on Bind, before shooting + aggr core.Aggregator // May be your custom Aggregator. + core.GunDeps + } + + func NewGun(conf GunConfig) *Gun { + return &Gun{conf: conf} + } + + func (g *Gun) Bind(aggr core.Aggregator, deps core.GunDeps) error { + targetPath := url.URL{Scheme: "ws", Host: g.conf.Target, Path: g.conf.Handler} + sample := netsample.Acquire("connection") + code := 0 + rand.Seed(time.Now().Unix()) + conn, _, err := websocket.DefaultDialer.Dial( + targetPath.String(), + nil, + ) + if err != nil { + log.Fatalf("dial err FATAL %s:", err) + code = 500 + } else { + code = 200 + } + g.client = *conn + g.aggr = aggr + g.GunDeps = deps + defer func() { + sample.SetProtoCode(code) + g.aggr.Report(sample) + }() + + go func() { + for { + _, message, err := conn.ReadMessage() + if err != nil { + log.Println("read:", err) + code = 400 + return + } + log.Printf("recv: %s", message) + } + }() + + err = conn.WriteMessage(websocket.TextMessage, []byte("some websocket connection initialization text, e.g. token")) + if err != nil { + log.Println("write:", err) + } + return nil + } + + func (g *Gun) Shoot(ammo core.Ammo) { + sample := netsample.Acquire("message") + code := 0 + conn := g.client + err := conn.WriteMessage(websocket.TextMessage, []byte("test_message")) + if err != nil { + log.Println("connection closed", err) + code = 600 + } else { + code = 200 + } + defer func() { + sample.SetProtoCode(code) + g.aggr.Report(sample) + }() + + } + + func main() { + //debug.SetGCPercent(-1) + // Standard imports. + fs := afero.NewOsFs() + coreimport.Import(fs) + // May not be imported, if you don't need http guns and etc. + phttp.Import(fs) + + // Custom imports. Integrate your custom types into configuration system. + coreimport.RegisterCustomJSONProvider("ammo_provider", func() core.Ammo { return &Ammo{} }) + + register.Gun("my_custom_gun_name", NewGun, func() GunConfig { + return GunConfig{ + Target: "default target", + } + }) + + cli.Run() + } diff --git a/docs/guns.rst b/docs/guns.rst new file mode 100644 index 000000000..e69de29bb diff --git a/docs/index.rst b/docs/index.rst new file mode 100644 index 000000000..ccb6a6d76 --- /dev/null +++ b/docs/index.rst @@ -0,0 +1,21 @@ +.. Pandora documentation master file, created by + sphinx-quickstart on Tue Apr 3 12:01:44 2018. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +Welcome to Pandora's documentation! +=================================== + +Pandora is a high-performance load generator in Go language. It has built-in HTTP(S) and HTTP/2 support and you can write your own load scenarios in Go, compiling them just before your test. + +.. toctree:: + :maxdepth: 2 + :caption: Contents: + + install + tutorial + advanced + guns + custom + performance + architecture diff --git a/docs/install.rst b/docs/install.rst new file mode 100644 index 000000000..3adeec919 --- /dev/null +++ b/docs/install.rst @@ -0,0 +1,21 @@ +Installation +============ + +`Download `_ binary release or build from source. + +We use go 1.11 modules. +If you build pandora inside $GOPATH, please make sure you have env variable `GO111MODULE` set to `on`. + +.. code-block:: bash + + git clone https://github.com/yandex/pandora.git + make deps + go install + + + +You can also cross-compile for other arch/os: + +.. code-block:: bash + + GOOS=linux GOARCH=amd64 go build diff --git a/docs/performance.rst b/docs/performance.rst new file mode 100644 index 000000000..294dba056 --- /dev/null +++ b/docs/performance.rst @@ -0,0 +1,107 @@ +Pandora's performance +===================== + +`Alexander Ivanov`_ made some performance tests for the gun itself. Here are the results. + +* Server: NGinx, 32 cores, 64G RAM. +* Tank: 32 cores, 128G RAM. +* Network: 1G. + +HTTP requests to nginx +---------------------- + +Static pages with different sizes. Server delays implemented in Lua script, we can +set delay time using ``sleep`` query parameter: + +.. code-block:: lua + + server { + listen 12999 default; + listen [::]:12999 default ipv6only=on; + server_name pandora.test.yandex.net; + + location ~* / { + + rewrite_by_lua_block { + local args = ngx.req.get_uri_args() + if args['sleep'] then + ngx.sleep(args['sleep']/1000) + end; + } + + root /etc/nginx/pandora; + error_page 404 = 404; + + } + + access_log off; + error_log off; + } + + +* **Connection: Close** 23k RPS + +.. image:: screenshot/http_connection_close_td.png + :align: center + :alt: Connection:Close, response times distribution + +* **Connection: Keep-Alive** 95k RPS + +.. image:: screenshot/http_keep_alive_td.png + :align: center + :alt: Keep-Alive, response times distribution + +* **Response size 10kB** maxed out network interface. OK. +* **Response size 100kb** maxed out network interface. OK. +* **POST requests 10kB** maxed out network interface. OK. +* **POST requests 100kB** maxed out network interface. OK. +* **POST requests 1MB** maxed out network interface. OK. + +.. image:: screenshot/http_100kb_net.png + :align: center + :alt: 100 kb responses, network load + + +* **50ms server delay** 30k RPS. OK. +* **500ms server delay** 30k RPS, 30k instances. OK. +* **1s server delay** 50k RPS, 50k instances. OK. +* **10s server delay** 5k RPS, 5k instances. OK. + +**All good.** + +.. image:: screenshot/http_delay_10s_td.png + :align: center + :alt: 10s server delay, response times distribution + +.. image:: screenshot/http_delay_10s_instances.png + :align: center + :alt: 10s server delay, instances count + + +* **Server fail during test** OK. + +.. image:: screenshot/http_srv_fail_q.png + :align: center + :alt: server fail emulation, response times quantiles + + +Custom scenarios +---------------- + +Custom scenarios performance depends very much of their implementation. In some our +test we saw spikes caused by GC. They can be avoided by reducing allocation size. +It is a good idea to optimize your scenarios. +Go has `a lot `_ of tools helping you +to do this. + +.. note:: We used JSON-formatted ammo to specify parameters for each scenario run. + +* **Small requests** 35k RPS. OK. +* **Some scenario steps with big JSON bodies** 35k RPS. OK. + +.. image:: screenshot/scn_cases.png + :align: center + :alt: scenario steps + + +.. _Alexander Ivanov: ival.net@yandex.ru \ No newline at end of file diff --git a/docs/screenshot/http_100kb_net.png b/docs/screenshot/http_100kb_net.png new file mode 100644 index 000000000..00fd85071 Binary files /dev/null and b/docs/screenshot/http_100kb_net.png differ diff --git a/docs/screenshot/http_connection_close_td.png b/docs/screenshot/http_connection_close_td.png new file mode 100644 index 000000000..0a83e90e2 Binary files /dev/null and b/docs/screenshot/http_connection_close_td.png differ diff --git a/docs/screenshot/http_delay_10s_instances.png b/docs/screenshot/http_delay_10s_instances.png new file mode 100644 index 000000000..82fd44e05 Binary files /dev/null and b/docs/screenshot/http_delay_10s_instances.png differ diff --git a/docs/screenshot/http_delay_10s_td.png b/docs/screenshot/http_delay_10s_td.png new file mode 100644 index 000000000..895a5d0c1 Binary files /dev/null and b/docs/screenshot/http_delay_10s_td.png differ diff --git a/docs/screenshot/http_keep_alive_td.png b/docs/screenshot/http_keep_alive_td.png new file mode 100644 index 000000000..bece8e01f Binary files /dev/null and b/docs/screenshot/http_keep_alive_td.png differ diff --git a/docs/screenshot/http_srv_fail_q.png b/docs/screenshot/http_srv_fail_q.png new file mode 100644 index 000000000..1cf83ebc7 Binary files /dev/null and b/docs/screenshot/http_srv_fail_q.png differ diff --git a/docs/screenshot/scn_cases.png b/docs/screenshot/scn_cases.png new file mode 100644 index 000000000..06004351e Binary files /dev/null and b/docs/screenshot/scn_cases.png differ diff --git a/docs/tutorial.rst b/docs/tutorial.rst new file mode 100644 index 000000000..e5db5e0b0 --- /dev/null +++ b/docs/tutorial.rst @@ -0,0 +1,81 @@ +Your first test +=============== + +You can use Pandora alone or use it with `Yandex.Tank`_ as a test runner and +`Overload`_ as a result viewer. In the second case Pandora's configuration is the same, but you will embed it into Yandex.Tank's config. + +Config file +----------- + +Pandora supports config files in `YAML`_ format. Create a new file named ``load.yaml`` and add following lines in your favourite editor: + +.. code-block:: yaml + + pools: + - id: HTTP pool # pool name (for your choice) + gun: + type: http # gun type + target: example.com:80 # gun target + ammo: + type: uri # ammo format + file: ./ammo.uri # ammo File + result: + type: phout # report format (phout is compatible with Yandex.Tank) + destination: ./phout.log # report file name + + rps: # shooting schedule + type: line # linear growth + from: 1 # from 1 response per second + to: 5 # to 5 responses per second + duration: 60s # for 60 seconds + + startup: # instances startup schedule + type: once # start 10 instances + times: 10 + + +You can enable debug information about gun (e.g. monitoring and additional logging). + +.. code-block:: yaml + + log: # gun logging configuration + level: error # log only `error` messages (`debug` for verbose logging) + + monitoring: + expvar: # gun statistics HTTP server + enabled: true + port: 1234 + cpuprofile: # cpu profiling + enabled: true + file: "cpuprofile.log" + memprofile: # mem profiling + enabled: true + memprofile: "memprofile.log" + + +`ammo.uri`: + +:: + + /my/first/url + /my/second/url + +Run your tests: + + +.. code-block:: bash + + pandora load.yaml + + +The results are in ``phout.log``. Use `Yandex.Tank`_ +and `Overload`_ to plot them. + +References +---------- + +.. target-notes:: + +.. _`Overload`: https://overload.yandex.net +.. _`Yandex.Tank`: http://yandextank.readthedocs.org/en/latest/configuration.html#pandora +.. _`YAML`: https://en.wikipedia.org/wiki/YAML \ No newline at end of file diff --git a/engine/engine.go b/engine/engine.go deleted file mode 100644 index 71603e397..000000000 --- a/engine/engine.go +++ /dev/null @@ -1,45 +0,0 @@ -package engine - -import ( - "log" - - "golang.org/x/net/context" - - "github.com/yandex/pandora/config" - "github.com/yandex/pandora/utils" -) - -type Engine struct { - cfg config.Global -} - -func New(cfg config.Global) *Engine { - return &Engine{ - cfg: cfg, - } -} - -func (e *Engine) Serve(ctx context.Context) error { - pools := make([]*UserPool, 0, len(e.cfg.Pools)) - for _, upc := range e.cfg.Pools { - up, err := NewUserPoolFromConfig(&upc) - if err != nil { - log.Printf("Could not create user pool: %s", err) - continue - } - pools = append(pools, up) - } - promises := utils.Promises{} - for _, up := range pools { - promises = append(promises, utils.PromiseCtx(ctx, up.Start)) - } - select { - case err := <-promises.All(): - if err != nil { - return err - } - case <-ctx.Done(): - } - log.Println("Done") - return nil -} diff --git a/engine/expvar.go b/engine/expvar.go deleted file mode 100644 index 4015b582c..000000000 --- a/engine/expvar.go +++ /dev/null @@ -1,73 +0,0 @@ -package engine - -import ( - "expvar" - "log" - "strconv" - "sync/atomic" - "time" -) - -type Counter struct { - i int64 -} - -func (c *Counter) String() string { - return strconv.FormatInt(atomic.LoadInt64(&c.i), 10) -} - -func (c *Counter) Add(delta int64) { - atomic.AddInt64(&c.i, delta) -} - -func (c *Counter) Set(value int64) { - atomic.StoreInt64(&c.i, value) -} - -func (c *Counter) Get() int64 { - return atomic.LoadInt64(&c.i) -} - -func NewCounter(name string) *Counter { - v := &Counter{} - expvar.Publish(name, v) - return v -} - -var ( - evRequests = NewCounter("engine_Requests") - evResponses = NewCounter("engine_Responses") - evUsersStarted = NewCounter("engine_UsersStarted") - evUsersFinished = NewCounter("engine_UsersFinished") -) - -func init() { - evReqPS := NewCounter("engine_ReqPS") - evResPS := NewCounter("engine_ResPS") - evActiveUsers := NewCounter("engine_ActiveUsers") - evActiveRequests := NewCounter("engine_ActiveRequests") - requests := evRequests.Get() - responses := evResponses.Get() - go func() { - var requestsNew, responsesNew int64 - for _ = range time.NewTicker(1 * time.Second).C { - requestsNew = evRequests.Get() - responsesNew = evResponses.Get() - rps := responsesNew - responses - reqps := requestsNew - requests - activeUsers := evUsersStarted.Get() - evUsersFinished.Get() - activeRequests := requestsNew - responsesNew - log.Printf( - "[ENGINE] %d resp/s; %d req/s; %d users; %d active\n", - rps, reqps, activeUsers, activeRequests) - - requests = requestsNew - responses = responsesNew - - evActiveUsers.Set(activeUsers) - evActiveRequests.Set(activeRequests) - evReqPS.Set(reqps) - evResPS.Set(rps) - } - }() -} diff --git a/engine/user.go b/engine/user.go deleted file mode 100644 index d2c22bdb5..000000000 --- a/engine/user.go +++ /dev/null @@ -1,165 +0,0 @@ -package engine - -import ( - "fmt" - "log" - - "golang.org/x/net/context" - - "github.com/yandex/pandora/aggregate" - "github.com/yandex/pandora/ammo" - "github.com/yandex/pandora/config" - "github.com/yandex/pandora/gun" - "github.com/yandex/pandora/limiter" - "github.com/yandex/pandora/utils" -) - -type User struct { - Name string - Ammunition ammo.Provider - Results aggregate.ResultListener - Limiter limiter.Limiter - Gun gun.Gun -} - -func (u *User) Run(ctx context.Context) error { - //log.Printf("Starting user: %s\n", u.Name) - evUsersStarted.Add(1) - defer func() { - //log.Printf("Exit user: %s\n", u.Name) - evUsersFinished.Add(1) - }() - control := u.Limiter.Control() - source := u.Ammunition.Source() - u.Gun.BindResultsTo(u.Results.Sink()) -loop: - for { - select { - case ammo, more := <-source: - if !more { - log.Println("Ammo ended") - break loop - } - _, more = <-control - if more { - evRequests.Add(1) - u.Gun.Shoot(ctx, ammo) - evResponses.Add(1) - u.Ammunition.Release(ammo) - } else { - //log.Println("Limiter ended.") - break loop - } - case <-ctx.Done(): - break loop - } - } - return nil -} - -type UserPool struct { - name string - userLimiterConfig *config.Limiter - gunConfig *config.Gun - ammunition ammo.Provider - results aggregate.ResultListener - startupLimiter limiter.Limiter - sharedSchedule bool - users []*User - done chan bool -} - -func NewUserPoolFromConfig(cfg *config.UserPool) (up *UserPool, err error) { - if cfg == nil { - return nil, fmt.Errorf("no pool config provided") - } - - ammunition, err := GetAmmoProvider(cfg.AmmoProvider) - if err != nil { - return nil, err - } - results, err := GetResultListener(cfg.ResultListener) - if err != nil { - return nil, err - } - startupLimiter, err := GetLimiter(cfg.StartupLimiter) - if err != nil { - return nil, err - } - up = &UserPool{ - name: cfg.Name, - ammunition: ammunition, - results: results, - startupLimiter: startupLimiter, - gunConfig: cfg.Gun, - userLimiterConfig: cfg.UserLimiter, - sharedSchedule: cfg.SharedSchedule, - } - return -} - -func (up *UserPool) Start(ctx context.Context) error { - // userCtx will be canceled when all users finished their execution - - utilCtx, utilCancel := context.WithCancel(ctx) - - userPromises := utils.Promises{} - utilsPromises := utils.Promises{ - utils.PromiseCtx(utilCtx, up.ammunition.Start), - utils.PromiseCtx(utilCtx, up.results.Start), - utils.PromiseCtx(utilCtx, up.startupLimiter.Start), - } - var sharedLimiter limiter.Limiter - - if up.sharedSchedule { - var err error - sharedLimiter, err = GetLimiter(up.userLimiterConfig) - if err != nil { - return fmt.Errorf("could not make a user limiter from config due to %s", err) - } - // Starting shared limiter. - // This may cause spike load in the beginning of a test if it takes time - // to initialize a user, because we don't wait for them to initialize in - // case of shared limiter and there might be some ticks accumulated - utilsPromises = append(utilsPromises, utils.PromiseCtx(utilCtx, sharedLimiter.Start)) - } - - for range up.startupLimiter.Control() { - var l limiter.Limiter - if up.sharedSchedule { - l = sharedLimiter - } else { - var err error - l, err = GetLimiter(up.userLimiterConfig) - if err != nil { - return fmt.Errorf("could not make a user limiter from config due to %s", err) - } - } - g, err := GetGun(up.gunConfig) - if err != nil { - return fmt.Errorf("could not make a gun from config due to %s", err) - } - u := &User{ - Name: up.name, - Ammunition: up.ammunition, - Results: up.results, - Limiter: l, - Gun: g, - } - if !up.sharedSchedule { - utilsPromises = append(utilsPromises, utils.PromiseCtx(utilCtx, l.Start)) - } - userPromises = append(userPromises, utils.PromiseCtx(ctx, u.Run)) - } - // FIXME: wrong logic here - log.Println("Started all users. Waiting for them") - err := <-userPromises.All() - log.Println("Stop utils") - utilCancel() // stop result listener when all users finished - - err2 := <-utilsPromises.All() - if err2 != nil { - fmt.Printf("Error waiting utils promises: %s", err2.Error()) - } - return err -} diff --git a/engine/utils.go b/engine/utils.go deleted file mode 100644 index 8de1dbed6..000000000 --- a/engine/utils.go +++ /dev/null @@ -1,56 +0,0 @@ -package engine - -import ( - "fmt" - - "github.com/yandex/pandora/aggregate" - "github.com/yandex/pandora/ammo" - "github.com/yandex/pandora/config" - "github.com/yandex/pandora/extpoints" - "github.com/yandex/pandora/gun" - "github.com/yandex/pandora/limiter" -) - -func GetAmmoProvider(c *config.AmmoProvider) (ammo.Provider, error) { - if c == nil { - return nil, nil - } - provider := extpoints.AmmoProviders.Lookup(c.AmmoType) - if provider == nil { - return nil, fmt.Errorf("Provider for ammo '%s' was not found", c.AmmoType) - } - return provider(c) -} - -func GetResultListener(c *config.ResultListener) (aggregate.ResultListener, error) { - if c == nil { - return nil, nil - } - listener := extpoints.ResultListeners.Lookup(c.ListenerType) - if listener == nil { - return nil, fmt.Errorf("Result listener '%s' was not found", c.ListenerType) - } - return listener(c) -} - -func GetLimiter(c *config.Limiter) (limiter.Limiter, error) { - if c == nil { - return nil, nil - } - l := extpoints.Limiters.Lookup(c.LimiterType) - if l == nil { - return nil, fmt.Errorf("Limiter type '%s' was not found", c.LimiterType) - } - return l(c) -} - -func GetGun(c *config.Gun) (gun.Gun, error) { - if c == nil { - return nil, nil - } - l := extpoints.Guns.Lookup(c.GunType) - if l == nil { - return nil, fmt.Errorf("Gun type '%s' was not found", c.GunType) - } - return l(c) -} diff --git a/engine/utils_test.go b/engine/utils_test.go deleted file mode 100644 index c6a7f280a..000000000 --- a/engine/utils_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package engine - -import ( - "testing" - - "github.com/yandex/pandora/config" -) - -func TestNotExistentLimiterConfig(t *testing.T) { - lc := &config.Limiter{ - LimiterType: "NOT_EXISTENT", - Parameters: nil, - } - l, err := GetLimiter(lc) - - if err == nil { - t.Errorf("No error on non existent limiter type") - } - if l != nil { - t.Errorf("Returned non-nil limiter for non existent limiter type") - } -} - -func TestEmptyLimiterConfig(t *testing.T) { - l, err := GetLimiter(nil) - - if err != nil { - t.Errorf("Error on empty limiter config: %s", err) - } - if l != nil { - t.Errorf("Returned non-nil limiter for empty config") - } -} diff --git a/example/config/example.json b/example/config/example.json deleted file mode 100644 index 24d577e01..000000000 --- a/example/config/example.json +++ /dev/null @@ -1,68 +0,0 @@ -{ - "Pools": [ - { - "Name": "Pool#0", - "Gun": { - "GunType": "spdy", - "Parameters": { - "Target": "localhost:3000" - } - }, - "AmmoProvider": { - "AmmoType": "jsonline/spdy", - "AmmoSource": "./example/data/ammo.jsonline", - "Passes": 10 - }, - "ResultListener": { - "ListenerType": "log/simple", - "Destination": "" - }, - "UserLimiter": { - "LimiterType": "periodic", - "Parameters": { - "BatchSize": 3, - "MaxCount": 9, - "Period": 1 - } - }, - "StartupLimiter": { - "LimiterType": "periodic", - "Parameters": { - "BatchSize": 2, - "MaxCount": 5, - "Period": 0.1 - } - } - }, - { - "Name": "Pool#1", - "Gun": { - "GunType": "log" - }, - "AmmoProvider": { - "AmmoType": "dummy/log", - "AmmoSource": "" - }, - "ResultListener": { - "ListenerType": "log/simple", - "Destination": "" - }, - "UserLimiter": { - "LimiterType": "periodic", - "Parameters": { - "BatchSize": 3, - "MaxCount": 9, - "Period": 1 - } - }, - "StartupLimiter": { - "LimiterType": "periodic", - "Parameters": { - "BatchSize": 1, - "MaxCount": 4, - "Period": 0.2 - } - } - } - ] -} \ No newline at end of file diff --git a/example/config/example_phout.json b/example/config/example_phout.json deleted file mode 100644 index d7bc88df0..000000000 --- a/example/config/example_phout.json +++ /dev/null @@ -1,103 +0,0 @@ -{ - "Pools": [ - { - "Name": "SPDY pool", - "Gun": { - "GunType": "spdy", - "Parameters": { - "Target": "localhost:3000" - } - }, - "AmmoProvider": { - "AmmoType": "jsonline/spdy", - "AmmoSource": "./example/data/ammo.jsonline" - }, - "ResultListener": { - "ListenerType": "log/simple", - "Destination": "./phout.log" - }, - "UserLimiter": { - "LimiterType": "periodic", - "Parameters": { - "BatchSize": 1, - "MaxCount": 3, - "Period": 1 - } - }, - "StartupLimiter": { - "LimiterType": "periodic", - "Parameters": { - "BatchSize": 1, - "MaxCount": 5, - "Period": 1 - } - } - }, - { - "Name": "HTTP pool", - "Gun": { - "GunType": "http", - "Parameters": { - "Target": "localhost:3000" - } - }, - "AmmoProvider": { - "AmmoType": "jsonline/http", - "AmmoSource": "./example/data/ammo.jsonline" - }, - "ResultListener": { - "ListenerType": "log/phout", - "Destination": "./http_phout.log" - }, - "UserLimiter": { - "LimiterType": "periodic", - "Parameters": { - "BatchSize": 2, - "MaxCount": 3, - "Period": 1 - } - }, - "StartupLimiter": { - "LimiterType": "periodic", - "Parameters": { - "BatchSize": 1, - "MaxCount": 5, - "Period": 0.5 - } - } - }, - { - "Name": "Pool#3", - "Gun": { - "GunType": "spdy", - "Parameters": { - "Target": "localhost:3000" - } - }, - "AmmoProvider": { - "AmmoType": "jsonline/spdy", - "AmmoSource": "./example/data/ammo.jsonline" - }, - "ResultListener": { - "ListenerType": "log/phout", - "Destination": "./phout.log" - }, - "UserLimiter": { - "LimiterType": "periodic", - "Parameters": { - "BatchSize": 3, - "MaxCount": 9, - "Period": 1 - } - }, - "StartupLimiter": { - "LimiterType": "periodic", - "Parameters": { - "BatchSize": 2, - "MaxCount": 5, - "Period": 0.1 - } - } - } - ] -} \ No newline at end of file diff --git a/example/data/ammo.jsonline b/example/data/ammo.jsonline deleted file mode 100644 index 47c3d85af..000000000 --- a/example/data/ammo.jsonline +++ /dev/null @@ -1,25 +0,0 @@ -{"uri": "/00", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/01", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"tag": "hello", "uri": "/02", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/03", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/04", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/05", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/06", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/07", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"tag": "hello", "uri": "/08", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/09", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/10", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/11", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/12", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/13", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/14", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/15", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/16", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/17", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/18", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/19", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/20", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/21", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/22", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/23", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"tag": "hello", "uri": "/24", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} diff --git a/examples/connect.yaml b/examples/connect.yaml new file mode 100644 index 000000000..0d1c4361b --- /dev/null +++ b/examples/connect.yaml @@ -0,0 +1,23 @@ +pools: + - gun: + type: connect + target: localhost:3000 # some secure proxy address + ssl: true + connect-ssl: false + ammo: + type: http/json + file: ./http.jsonline + result: + type: phout + destination: ./http_phout.log + rps-per-instance: true + rps: + type: line + from: 1 + to: 5 + duration: 2s + startup: + type: once + times: 5 +log: + level: debug diff --git a/examples/custom_pandora/custom.yaml b/examples/custom_pandora/custom.yaml new file mode 100644 index 000000000..ab6b0fd6f --- /dev/null +++ b/examples/custom_pandora/custom.yaml @@ -0,0 +1,27 @@ +pools: +- gun: + type: my-custom-gun-name + target: example.com:80 + + ammo: + type: my-custom-provider-name + source: # You may just write file path here. Or stdin. + type: inline + data: | + {"url": "url1", "queryParam": "query1"} + {"url": "url2", "queryParam": "query2"} + + result: + type: json + sink: stdout # Just for interactivity print result to stdout. Usually file used here. + + rps: + - {duration: 2s, type: line, from: 1, to: 5} + - {duration: 3s, type: const, ops: 5} + - {duration: 2s, type: line, from: 5, to: 1} + startup: + type: once + times: 5 + +log: + level: debug diff --git a/examples/custom_pandora/custom_main.go b/examples/custom_pandora/custom_main.go new file mode 100644 index 000000000..b5304501d --- /dev/null +++ b/examples/custom_pandora/custom_main.go @@ -0,0 +1,89 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package main + +import ( + "math/rand" + "time" + + "github.com/spf13/afero" + "go.uber.org/zap" + + "github.com/yandex/pandora/cli" + phttp "github.com/yandex/pandora/components/phttp/import" + "github.com/yandex/pandora/core" + coreimport "github.com/yandex/pandora/core/import" + "github.com/yandex/pandora/core/register" +) + +type Ammo struct { + URL string + QueryParam string +} + +type Sample struct { + URL string + ShootTimeSeconds float64 +} + +type GunConfig struct { + Target string `validate:"required"` // Configuration will fail, without target defined +} + +type Gun struct { + // Configured on construction. + conf GunConfig + + // Configured on Bind, before shooting. + aggr core.Aggregator // May be your custom Aggregator. + core.GunDeps +} + +func NewGun(conf GunConfig) *Gun { + return &Gun{conf: conf} +} + +func (g *Gun) Bind(aggr core.Aggregator, deps core.GunDeps) error { + g.aggr = aggr + g.GunDeps = deps + return nil +} + +func (g *Gun) Shoot(ammo core.Ammo) { + customAmmo := ammo.(*Ammo) // Shoot will panic on unexpected ammo type. Panic cancels shooting. + g.shoot(customAmmo) +} + +func (g *Gun) shoot(ammo *Ammo) { + start := time.Now() + defer func() { + g.aggr.Report(Sample{ammo.URL, time.Since(start).Seconds()}) + }() + // Put your shoot logic here. + g.Log.Info("Custom shoot", zap.String("target", g.conf.Target), zap.Any("ammo", ammo)) + time.Sleep(time.Duration(rand.Float64() * float64(time.Second))) +} + +func main() { + // Standard imports. + fs := afero.NewOsFs() + coreimport.Import(fs) + + // May not be imported, if you don't need http guns and etc. + phttp.Import(fs) + + // Custom imports. Integrate your custom types into configuration system. + coreimport.RegisterCustomJSONProvider("my-custom-provider-name", func() core.Ammo { return &Ammo{} }) + + register.Gun("my-custom-gun-name", NewGun, func() GunConfig { + return GunConfig{ + Target: "default target", + } + }) + register.Gun("my-custom/no-default", NewGun) + + cli.Run() +} diff --git a/examples/debug_and_profiling.yaml b/examples/debug_and_profiling.yaml new file mode 100644 index 000000000..bae7aa536 --- /dev/null +++ b/examples/debug_and_profiling.yaml @@ -0,0 +1,31 @@ +pools: +- id: HTTP pool + gun: + type: http + target: example.com:80 + dial: + timeout: 1s + ammo: + type: http/json + file: ./examples/http.jsonline + result: + type: phout + destination: ./http_phout.log + rps: + type: line + from: 1 + to: 5 + duration: 2s + startup: + type: once + times: 5 +log: + level: debug +monitoring: + expvar: + enabled: true + port: 1234 + cpuprofile: + enabled: true + memprofile: + enabled: true diff --git a/examples/example.yaml b/examples/example.yaml new file mode 100644 index 000000000..b005c214e --- /dev/null +++ b/examples/example.yaml @@ -0,0 +1,19 @@ +pools: + - id: example + gun: + type: example + ammo: + type: example + limit: 100 + result: + type: log + rps: + type: line + from: 1 + to: 5 + duration: 2s + startup: + type: once + times: 5 +log: + level: debug diff --git a/examples/http.jsonline b/examples/http.jsonline new file mode 100644 index 000000000..fb151fc94 --- /dev/null +++ b/examples/http.jsonline @@ -0,0 +1 @@ +{"uri": "/", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "User-Agent": "Pandora"}, "host": "example.com"} diff --git a/examples/http.yaml b/examples/http.yaml new file mode 100644 index 000000000..46f847304 --- /dev/null +++ b/examples/http.yaml @@ -0,0 +1,31 @@ +pools: + - id: HTTP pool + gun: + type: http + target: example.com:80 + dial: + timeout: 1s + ammo: + type: http/json + file: ./examples/http.jsonline + result: + type: phout + destination: ./http_phout.log + rps: + type: line + from: 1 + to: 5 + duration: 2s + startup: + type: once + times: 5 +log: + level: debug +monitoring: + expvar: + enabled: true + port: 1234 + cpuprofile: + enabled: false + memprofile: + enabled: false diff --git a/extpoints/extpoints.go b/extpoints/extpoints.go deleted file mode 100644 index daafb378d..000000000 --- a/extpoints/extpoints.go +++ /dev/null @@ -1,327 +0,0 @@ -// generated by go-extpoints -- DO NOT EDIT -package extpoints - -import ( - "reflect" - "runtime" - "strings" - "sync" -) - -var extRegistry = ®istryType{m: make(map[string]*extensionPoint)} - -type registryType struct { - sync.Mutex - m map[string]*extensionPoint -} - -// Top level registration - -func extensionTypes(extension interface{}) []string { - var ifaces []string - typ := reflect.TypeOf(extension) - for name, ep := range extRegistry.m { - if ep.iface.Kind() == reflect.Func && typ.AssignableTo(ep.iface) { - ifaces = append(ifaces, name) - } - if ep.iface.Kind() != reflect.Func && typ.Implements(ep.iface) { - ifaces = append(ifaces, name) - } - } - return ifaces -} - -func RegisterExtension(extension interface{}, name string) []string { - extRegistry.Lock() - defer extRegistry.Unlock() - var ifaces []string - for _, iface := range extensionTypes(extension) { - if extRegistry.m[iface].register(extension, name) { - ifaces = append(ifaces, iface) - } - } - return ifaces -} - -func UnregisterExtension(name string) []string { - extRegistry.Lock() - defer extRegistry.Unlock() - var ifaces []string - for iface, extpoint := range extRegistry.m { - if extpoint.unregister(name) { - ifaces = append(ifaces, iface) - } - } - return ifaces -} - -// Base extension point - -type extensionPoint struct { - sync.Mutex - iface reflect.Type - extensions map[string]interface{} -} - -func newExtensionPoint(iface interface{}) *extensionPoint { - ep := &extensionPoint{ - iface: reflect.TypeOf(iface).Elem(), - extensions: make(map[string]interface{}), - } - extRegistry.Lock() - extRegistry.m[ep.iface.Name()] = ep - extRegistry.Unlock() - return ep -} - -func (ep *extensionPoint) lookup(name string) interface{} { - ep.Lock() - defer ep.Unlock() - ext, ok := ep.extensions[name] - if !ok { - return nil - } - return ext -} - -func (ep *extensionPoint) all() map[string]interface{} { - ep.Lock() - defer ep.Unlock() - all := make(map[string]interface{}) - for k, v := range ep.extensions { - all[k] = v - } - return all -} - -func (ep *extensionPoint) register(extension interface{}, name string) bool { - ep.Lock() - defer ep.Unlock() - if name == "" { - typ := reflect.TypeOf(extension) - if typ.Kind() == reflect.Func { - nameParts := strings.Split(runtime.FuncForPC( - reflect.ValueOf(extension).Pointer()).Name(), ".") - name = nameParts[len(nameParts)-1] - } else { - name = typ.Elem().Name() - } - } - _, exists := ep.extensions[name] - if exists { - return false - } - ep.extensions[name] = extension - return true -} - -func (ep *extensionPoint) unregister(name string) bool { - ep.Lock() - defer ep.Unlock() - _, exists := ep.extensions[name] - if !exists { - return false - } - delete(ep.extensions, name) - return true -} - -// AmmoProvider - -var AmmoProviders = &ammoProviderExt{ - newExtensionPoint(new(AmmoProvider)), -} - -type ammoProviderExt struct { - *extensionPoint -} - -func (ep *ammoProviderExt) Unregister(name string) bool { - return ep.unregister(name) -} - -func (ep *ammoProviderExt) Register(extension AmmoProvider, name string) bool { - return ep.register(extension, name) -} - -func (ep *ammoProviderExt) Lookup(name string) AmmoProvider { - ext := ep.lookup(name) - if ext == nil { - return nil - } - return ext.(AmmoProvider) -} - -func (ep *ammoProviderExt) Select(names []string) []AmmoProvider { - var selected []AmmoProvider - for _, name := range names { - selected = append(selected, ep.Lookup(name)) - } - return selected -} - -func (ep *ammoProviderExt) All() map[string]AmmoProvider { - all := make(map[string]AmmoProvider) - for k, v := range ep.all() { - all[k] = v.(AmmoProvider) - } - return all -} - -func (ep *ammoProviderExt) Names() []string { - var names []string - for k := range ep.all() { - names = append(names, k) - } - return names -} - -// ResultListener - -var ResultListeners = &resultListenerExt{ - newExtensionPoint(new(ResultListener)), -} - -type resultListenerExt struct { - *extensionPoint -} - -func (ep *resultListenerExt) Unregister(name string) bool { - return ep.unregister(name) -} - -func (ep *resultListenerExt) Register(extension ResultListener, name string) bool { - return ep.register(extension, name) -} - -func (ep *resultListenerExt) Lookup(name string) ResultListener { - ext := ep.lookup(name) - if ext == nil { - return nil - } - return ext.(ResultListener) -} - -func (ep *resultListenerExt) Select(names []string) []ResultListener { - var selected []ResultListener - for _, name := range names { - selected = append(selected, ep.Lookup(name)) - } - return selected -} - -func (ep *resultListenerExt) All() map[string]ResultListener { - all := make(map[string]ResultListener) - for k, v := range ep.all() { - all[k] = v.(ResultListener) - } - return all -} - -func (ep *resultListenerExt) Names() []string { - var names []string - for k := range ep.all() { - names = append(names, k) - } - return names -} - -// Limiter - -var Limiters = &limiterExt{ - newExtensionPoint(new(Limiter)), -} - -type limiterExt struct { - *extensionPoint -} - -func (ep *limiterExt) Unregister(name string) bool { - return ep.unregister(name) -} - -func (ep *limiterExt) Register(extension Limiter, name string) bool { - return ep.register(extension, name) -} - -func (ep *limiterExt) Lookup(name string) Limiter { - ext := ep.lookup(name) - if ext == nil { - return nil - } - return ext.(Limiter) -} - -func (ep *limiterExt) Select(names []string) []Limiter { - var selected []Limiter - for _, name := range names { - selected = append(selected, ep.Lookup(name)) - } - return selected -} - -func (ep *limiterExt) All() map[string]Limiter { - all := make(map[string]Limiter) - for k, v := range ep.all() { - all[k] = v.(Limiter) - } - return all -} - -func (ep *limiterExt) Names() []string { - var names []string - for k := range ep.all() { - names = append(names, k) - } - return names -} - -// Gun - -var Guns = &gunExt{ - newExtensionPoint(new(Gun)), -} - -type gunExt struct { - *extensionPoint -} - -func (ep *gunExt) Unregister(name string) bool { - return ep.unregister(name) -} - -func (ep *gunExt) Register(extension Gun, name string) bool { - return ep.register(extension, name) -} - -func (ep *gunExt) Lookup(name string) Gun { - ext := ep.lookup(name) - if ext == nil { - return nil - } - return ext.(Gun) -} - -func (ep *gunExt) Select(names []string) []Gun { - var selected []Gun - for _, name := range names { - selected = append(selected, ep.Lookup(name)) - } - return selected -} - -func (ep *gunExt) All() map[string]Gun { - all := make(map[string]Gun) - for k, v := range ep.all() { - all[k] = v.(Gun) - } - return all -} - -func (ep *gunExt) Names() []string { - var names []string - for k := range ep.all() { - names = append(names, k) - } - return names -} diff --git a/extpoints/interfaces.go b/extpoints/interfaces.go deleted file mode 100644 index 3f8a6161d..000000000 --- a/extpoints/interfaces.go +++ /dev/null @@ -1,17 +0,0 @@ -package extpoints - -import ( - "github.com/yandex/pandora/aggregate" - "github.com/yandex/pandora/ammo" - "github.com/yandex/pandora/config" - "github.com/yandex/pandora/gun" - "github.com/yandex/pandora/limiter" -) - -type AmmoProvider func(*config.AmmoProvider) (ammo.Provider, error) - -type ResultListener func(*config.ResultListener) (aggregate.ResultListener, error) - -type Limiter func(*config.Limiter) (limiter.Limiter, error) - -type Gun func(*config.Gun) (gun.Gun, error) diff --git a/go.mod b/go.mod new file mode 100644 index 000000000..838b874e3 --- /dev/null +++ b/go.mod @@ -0,0 +1,45 @@ +module github.com/yandex/pandora + +require ( + github.com/BurntSushi/toml v0.3.1 // indirect + github.com/asaskevich/govalidator v0.0.0-20171111151018-521b25f4b05f + github.com/c2h5oh/datasize v0.0.0-20171227191756-4eba002a5eae + github.com/davecgh/go-spew v1.1.0 // indirect + github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 + github.com/facebookgo/stackerr v0.0.0-20150612192056-c2fcf88613f4 + github.com/fatih/structs v1.0.0 + github.com/fsnotify/fsnotify v1.4.7 // indirect + github.com/ghodss/yaml v1.0.0 + github.com/go-playground/locales v0.11.2 // indirect + github.com/go-playground/universal-translator v0.16.0 // indirect + github.com/golang/protobuf v1.3.1 // indirect + github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce // indirect + github.com/hashicorp/go-multierror v0.0.0-20171204182908-b7773ae21874 + github.com/hashicorp/hcl v0.0.0-20171017181929-23c074d0eceb // indirect + github.com/json-iterator/go v0.0.0-20180214060632-e7c7f3b33712 + github.com/kr/pretty v0.1.0 // indirect + github.com/magiconair/properties v1.7.6 + github.com/mitchellh/mapstructure v0.0.0-20180203102830-a4e142e9c047 + github.com/onsi/ginkgo v1.4.0 + github.com/onsi/gomega v1.3.0 + github.com/pelletier/go-toml v1.1.0 // indirect + github.com/pkg/errors v0.8.0 + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/pquerna/ffjson v0.0.0-20171002144729-d49c2bc1aa13 + github.com/spf13/afero v1.0.2 + github.com/spf13/cast v1.2.0 // indirect + github.com/spf13/jwalterweatherman v0.0.0-20180109140146-7c0cea34c8ec // indirect + github.com/spf13/pflag v1.0.0 // indirect + github.com/spf13/viper v1.0.0 + github.com/stretchr/objx v0.1.0 // indirect + github.com/stretchr/testify v1.2.1 + github.com/uber-go/atomic v1.3.0 + go.uber.org/atomic v1.3.1 + go.uber.org/multierr v1.1.0 // indirect + go.uber.org/zap v1.7.1 + golang.org/x/net v0.0.0-20190311183353-d8887717615a + gopkg.in/bluesuncorp/validator.v9 v9.10.0 + gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect + gopkg.in/go-playground/assert.v1 v1.2.1 // indirect + gopkg.in/yaml.v2 v2.0.0 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 000000000..6c128cc18 --- /dev/null +++ b/go.sum @@ -0,0 +1,90 @@ +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/asaskevich/govalidator v0.0.0-20171111151018-521b25f4b05f h1:xHxhygLkJBQaXZ7H0JUpmqK/gfKO2DZXB7gAKT6bbBs= +github.com/asaskevich/govalidator v0.0.0-20171111151018-521b25f4b05f/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/c2h5oh/datasize v0.0.0-20171227191756-4eba002a5eae h1:2Zmk+8cNvAGuY8AyvZuWpUdpQUAXwfom4ReVMe/CTIo= +github.com/c2h5oh/datasize v0.0.0-20171227191756-4eba002a5eae/go.mod h1:S/7n9copUssQ56c7aAgHqftWO4LTf4xY6CGWt8Bc+3M= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A= +github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg= +github.com/facebookgo/stackerr v0.0.0-20150612192056-c2fcf88613f4 h1:fP04zlkPjAGpsduG7xN3rRkxjAqkJaIQnnkNYYw/pAk= +github.com/facebookgo/stackerr v0.0.0-20150612192056-c2fcf88613f4/go.mod h1:SBHk9aNQtiw4R4bEuzHjVmZikkUKCnO1v3lPQ21HZGk= +github.com/fatih/structs v1.0.0 h1:BrX964Rv5uQ3wwS+KRUAJCBBw5PQmgJfJ6v4yly5QwU= +github.com/fatih/structs v1.0.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-playground/locales v0.11.2 h1:wH6Ksuvzk0SU9M6wUeGz/EaRWnavAHCOsFre1njzgi8= +github.com/go-playground/locales v0.11.2/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM= +github.com/go-playground/universal-translator v0.16.0 h1:X++omBR/4cE2MNg91AoC3rmGrCjJ8eAeUP/K/EKx4DM= +github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY= +github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce h1:prjrVgOk2Yg6w+PflHoszQNLTUh4kaByUcEWM/9uin4= +github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v0.0.0-20171204182908-b7773ae21874 h1:em+tTnzgU7N22woTBMcSJAOW7tRHAkK597W+MD/CpK8= +github.com/hashicorp/go-multierror v0.0.0-20171204182908-b7773ae21874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= +github.com/hashicorp/hcl v0.0.0-20171017181929-23c074d0eceb h1:1OvvPvZkn/yCQ3xBcM8y4020wdkMXPHLB4+NfoGWh4U= +github.com/hashicorp/hcl v0.0.0-20171017181929-23c074d0eceb/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w= +github.com/json-iterator/go v0.0.0-20180214060632-e7c7f3b33712 h1:nANBg0vxBeNql2DGe4fxaAGskRFEWtXg5cgWJYuHQ14= +github.com/json-iterator/go v0.0.0-20180214060632-e7c7f3b33712/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/magiconair/properties v1.7.6 h1:U+1DqNen04MdEPgFiIwdOUiqZ8qPa37xgogX/sd3+54= +github.com/magiconair/properties v1.7.6/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mitchellh/mapstructure v0.0.0-20180203102830-a4e142e9c047 h1:zCoDWFD5nrJJVjbXiDZcVhOBSzKn3o9LgRLLMRNuru8= +github.com/mitchellh/mapstructure v0.0.0-20180203102830-a4e142e9c047/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/onsi/ginkgo v1.4.0 h1:n60/4GZK0Sr9O2iuGKq876Aoa0ER2ydgpMOBwzJ8e2c= +github.com/onsi/ginkgo v1.4.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.3.0 h1:yPHEatyQC4jN3vdfvqJXG7O9vfC6LhaAV1NEdYpP+h0= +github.com/onsi/gomega v1.3.0/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/pelletier/go-toml v1.1.0 h1:cmiOvKzEunMsAxyhXSzpL5Q1CRKpVv0KQsnAIcSEVYM= +github.com/pelletier/go-toml v1.1.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pquerna/ffjson v0.0.0-20171002144729-d49c2bc1aa13 h1:AUK/hm/tPsiNNASdb3J8fySVRZoI7fnK5mlOvdFD43o= +github.com/pquerna/ffjson v0.0.0-20171002144729-d49c2bc1aa13/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= +github.com/spf13/afero v1.0.2 h1:5bRmqmInNmNFkI9NG9O0Xc/Lgl9wOWWUUA/O8XZqTCo= +github.com/spf13/afero v1.0.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.2.0 h1:HHl1DSRbEQN2i8tJmtS6ViPyHx35+p51amrdsiTCrkg= +github.com/spf13/cast v1.2.0/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg= +github.com/spf13/jwalterweatherman v0.0.0-20180109140146-7c0cea34c8ec h1:2ZXvIUGghLpdTVHR1UfvfrzoVlZaE/yOWC5LueIHZig= +github.com/spf13/jwalterweatherman v0.0.0-20180109140146-7c0cea34c8ec/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.0 h1:oaPbdDe/x0UncahuwiPxW1GYJyilRAdsPnq3e1yaPcI= +github.com/spf13/pflag v1.0.0/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/viper v1.0.0 h1:RUA/ghS2i64rlnn4ydTfblY8Og8QzcPtCcHvgMn+w/I= +github.com/spf13/viper v1.0.0/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM= +github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.1 h1:52QO5WkIUcHGIR7EnGagH88x1bUzqGXTC5/1bDTUQ7U= +github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/uber-go/atomic v1.3.0 h1:ylWoWcs+jXihgo3Us1Sdsatf2R6+OlBGm8fexR3oFG4= +github.com/uber-go/atomic v1.3.0/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g= +go.uber.org/atomic v1.3.1 h1:U8WaWEmp56LGz7PReduqHRVF6zzs9GbMC2NEZ42dxSQ= +go.uber.org/atomic v1.3.1/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v1.7.1 h1:wKPciimwkIgV4Aag/wpSDzvtO5JrfwdHKHO7blTHx7Q= +go.uber.org/zap v1.7.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +gopkg.in/bluesuncorp/validator.v9 v9.10.0 h1:eyhz/IzFglUqngYr1p7WCfKVAAQh9E/IsqJcnwG/OWg= +gopkg.in/bluesuncorp/validator.v9 v9.10.0/go.mod h1:sz1RrKEIYJCpC5S6ruDsBWo5vYV69E+bEZ86LbUsSZ8= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/go-playground/assert.v1 v1.2.1 h1:xoYuJVE7KT85PYWrN730RguIQO0ePzVRfFMXadIrXTM= +gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= +gopkg.in/yaml.v2 v2.0.0 h1:uUkhRGrsEyx/laRdeS6YIQKIys8pg+lRSRdVMTYjivs= +gopkg.in/yaml.v2 v2.0.0/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= diff --git a/gun/gun.go b/gun/gun.go deleted file mode 100644 index 85f048b09..000000000 --- a/gun/gun.go +++ /dev/null @@ -1,12 +0,0 @@ -package gun - -import ( - "github.com/yandex/pandora/aggregate" - "github.com/yandex/pandora/ammo" - "golang.org/x/net/context" -) - -type Gun interface { - Shoot(context.Context, ammo.Ammo) error - BindResultsTo(chan<- *aggregate.Sample) -} diff --git a/gun/http/http.go b/gun/http/http.go deleted file mode 100644 index 8b3b42143..000000000 --- a/gun/http/http.go +++ /dev/null @@ -1,172 +0,0 @@ -package http - -import ( - "crypto/tls" - "errors" - "fmt" - "io" - "io/ioutil" - "log" - "net/http" - "time" - - "golang.org/x/net/context" - - "net" - - "github.com/yandex/pandora/aggregate" - "github.com/yandex/pandora/ammo" - "github.com/yandex/pandora/config" - "github.com/yandex/pandora/gun" -) - -// === Gun === - -const ( - // TODO: extract to config? - dialTimeout = 3 // in sec -) - -type HttpGun struct { - target string - ssl bool - client *http.Client - results chan<- *aggregate.Sample -} - -func (hg *HttpGun) BindResultsTo(results chan<- *aggregate.Sample) { - hg.results = results -} - -// Shoot to target, this method is not thread safe -func (hg *HttpGun) Shoot(ctx context.Context, a ammo.Ammo) error { - - if hg.client == nil { - hg.Connect() - } - start := time.Now() - ss := aggregate.AcquireSample(float64(start.UnixNano())/1e9, "REQUEST") - defer func() { - ss.RT = int(time.Since(start).Seconds() * 1e6) - hg.results <- ss - }() - // now send the request to obtain a http response - ha, ok := a.(*ammo.Http) - if !ok { - panic(fmt.Sprintf("Got '%T' instead of 'HttpAmmo'", a)) - } - if ha.Tag != "" { - ss.Tag += "|" + ha.Tag - } - var uri string - if hg.ssl { - uri = "https://" + ha.Host + ha.Uri - } else { - uri = "http://" + ha.Host + ha.Uri - } - req, err := http.NewRequest(ha.Method, uri, nil) - if err != nil { - log.Printf("Error making HTTP request: %s\n", err) - ss.Err = err - ss.NetCode = 999 - return err - } - for k, v := range ha.Headers { - req.Header.Set(k, v) - } - req.URL.Host = hg.target - res, err := hg.client.Do(req) - if err != nil { - log.Printf("Error performing a request: %s\n", err) - ss.Err = err - ss.NetCode = 999 - return err - } - defer res.Body.Close() - _, err = io.Copy(ioutil.Discard, res.Body) - if err != nil { - log.Printf("Error reading response body: %s\n", err) - ss.Err = err - ss.NetCode = 999 - return err - } - - // TODO: make this an optional verbose answ_log output - //data := make([]byte, int(res.ContentLength)) - // _, err = res.Body.(io.Reader).Read(data) - // fmt.Println(string(data)) - ss.ProtoCode = res.StatusCode - return nil -} - -func (hg *HttpGun) Close() { - hg.client = nil -} - -func (hg *HttpGun) Connect() { - hg.Close() - config := tls.Config{ - InsecureSkipVerify: true, - } - // TODO: do we want to give access to keep alive settings for guns in config? - dialer := &net.Dialer{ - Timeout: dialTimeout * time.Second, - KeepAlive: 120 * time.Second, - } - tr := &http.Transport{ - TLSClientConfig: &config, - Dial: dialer.Dial, - TLSHandshakeTimeout: dialTimeout * time.Second, - } - hg.client = &http.Client{Transport: tr} - // connectStart := time.Now() - // config := tls.Config{ - // InsecureSkipVerify: true, - // NextProtos: []string{"HTTP/1.1"}, - // } - - // conn, err := tls.Dial("tcp", hg.target, &config) - // if err != nil { - // log.Printf("client: dial: %s\n", err) - // return - // } - // hg.client, err = Http.NewClientConn(conn) - // if err != nil { - // log.Printf("client: connect: %s\n", err) - // return - // } - // ss := aggregate.AcquireSample(float64(start.UnixNano())/1e9, "CONNECT") - // ss.rt = int(time.Since(connectStart).Seconds() * 1e6) - // ss.err = err - // if ss.err == nil { - // ss.StatusCode = 200 - // } - // results <- ss -} - -func New(c *config.Gun) (gun.Gun, error) { - params := c.Parameters - if params == nil { - return nil, errors.New("Parameters not specified") - } - target, ok := params["Target"] - if !ok { - return nil, errors.New("Target not specified") - } - g := &HttpGun{} - switch t := target.(type) { - case string: - g.target = target.(string) - default: - return nil, fmt.Errorf("Target is of the wrong type."+ - " Expected 'string' got '%T'", t) - } - if ssl, ok := params["SSL"]; ok { - if sslVal, casted := ssl.(bool); casted { - g.ssl = sslVal - } else { - return nil, fmt.Errorf("SSL should be boolean type.") - } - } - return g, nil -} diff --git a/gun/http/http_test.go b/gun/http/http_test.go deleted file mode 100644 index 8ebb5ceac..000000000 --- a/gun/http/http_test.go +++ /dev/null @@ -1,137 +0,0 @@ -package http - -import ( - "fmt" - "net/http" - "net/http/httptest" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "golang.org/x/net/context" - - "github.com/yandex/pandora/aggregate" - "github.com/yandex/pandora/ammo" - "github.com/yandex/pandora/utils" -) - -func TestHttpGunWithSsl(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) - defer cancel() - - result := make(chan *aggregate.Sample) - requests := make(chan *http.Request) - - ts := httptest.NewTLSServer( - http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintln(w, "Hello, client") - go func() { - requests <- r - }() - })) - defer ts.Close() - - gun := &HttpGun{ - target: ts.Listener.Addr().String(), - ssl: true, - results: result, - } - promise := utils.Promise(func() error { - defer close(result) - return gun.Shoot(ctx, &ammo.Http{ - Host: "example.org", - Method: "GET", - Uri: "/path", - Headers: map[string]string{ - "Accept": "*/*", - "Accept-Encoding": "gzip, deflate", - "Host": "example.org", - "User-Agent": "Pandora/0.0.1", - }, - }) - }) - results := aggregate.Drain(ctx, result) - require.Len(t, results, 1) - assert.Equal(t, "REQUEST", results[0].Tag) - assert.Equal(t, 200, results[0].ProtoCode) - - select { - case r := <-requests: - assert.Equal(t, "GET", r.Method) - assert.Equal(t, "example.org", r.Host) - assert.Equal(t, "/path", r.URL.Path) - assert.Equal(t, "Pandora/0.0.1", r.Header.Get("User-Agent")) - case <-ctx.Done(): - t.Fatal(ctx.Err()) - } - - // TODO: test scenaries with errors - - select { - case err := <-promise: - require.NoError(t, err) - case <-ctx.Done(): - t.Fatal(ctx.Err()) - } -} - -func TestHttpGunWithHttp(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) - defer cancel() - - result := make(chan *aggregate.Sample) - requests := make(chan *http.Request) - - ts := httptest.NewServer( - http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintln(w, "Hello, client") - go func() { - requests <- r - }() - })) - defer ts.Close() - - gun := &HttpGun{ - target: ts.Listener.Addr().String(), - ssl: false, - results: result, - } - promise := utils.Promise(func() error { - defer close(result) - return gun.Shoot(ctx, &ammo.Http{ - Host: "example.org", - Method: "GET", - Uri: "/path", - Headers: map[string]string{ - "Accept": "*/*", - "Accept-Encoding": "gzip, deflate", - "Host": "example.org", - "User-Agent": "Pandora/0.0.1", - }, - }) - }) - results := aggregate.Drain(ctx, result) - require.Len(t, results, 1) - assert.Equal(t, "REQUEST", results[0].Tag) - assert.Equal(t, 200, results[0].ProtoCode) - - select { - case r := <-requests: - assert.Equal(t, "GET", r.Method) - assert.Equal(t, "example.org", r.Host) - assert.Equal(t, "/path", r.URL.Path) - assert.Equal(t, "Pandora/0.0.1", r.Header.Get("User-Agent")) - case <-ctx.Done(): - t.Fatal(ctx.Err()) - } - - // TODO: test scenaries with errors - - select { - case err := <-promise: - require.NoError(t, err) - case <-ctx.Done(): - t.Fatal(ctx.Err()) - } -} diff --git a/gun/http/testdata/ammo.jsonline b/gun/http/testdata/ammo.jsonline deleted file mode 100644 index 2f459d2fd..000000000 --- a/gun/http/testdata/ammo.jsonline +++ /dev/null @@ -1,25 +0,0 @@ -{"uri": "/00", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "localhost:3000", "User-Agent": "Pandora/0.0.1"}, "host": "localhost:3000"} -{"uri": "/01", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "localhost:3000", "User-Agent": "Pandora/0.0.1"}, "host": "localhost:3000"} -{"tag": "hello", "uri": "/02", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "localhost:3000", "User-Agent": "Pandora/0.0.1"}, "host": "localhost:3000"} -{"uri": "/03", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "localhost:3000", "User-Agent": "Pandora/0.0.1"}, "host": "localhost:3000"} -{"uri": "/04", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "localhost:3000", "User-Agent": "Pandora/0.0.1"}, "host": "localhost:3000"} -{"uri": "/05", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "localhost:3000", "User-Agent": "Pandora/0.0.1"}, "host": "localhost:3000"} -{"uri": "/06", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "localhost:3000", "User-Agent": "Pandora/0.0.1"}, "host": "localhost:3000"} -{"uri": "/07", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "localhost:3000", "User-Agent": "Pandora/0.0.1"}, "host": "localhost:3000"} -{"tag": "hello", "uri": "/08", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "localhost:3000", "User-Agent": "Pandora/0.0.1"}, "host": "localhost:3000"} -{"uri": "/09", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "localhost:3000", "User-Agent": "Pandora/0.0.1"}, "host": "localhost:3000"} -{"uri": "/10", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "localhost:3000", "User-Agent": "Pandora/0.0.1"}, "host": "localhost:3000"} -{"uri": "/11", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "localhost:3000", "User-Agent": "Pandora/0.0.1"}, "host": "localhost:3000"} -{"uri": "/12", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "localhost:3000", "User-Agent": "Pandora/0.0.1"}, "host": "localhost:3000"} -{"uri": "/13", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "localhost:3000", "User-Agent": "Pandora/0.0.1"}, "host": "localhost:3000"} -{"uri": "/14", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "localhost:3000", "User-Agent": "Pandora/0.0.1"}, "host": "localhost:3000"} -{"uri": "/15", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "localhost:3000", "User-Agent": "Pandora/0.0.1"}, "host": "localhost:3000"} -{"uri": "/16", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "localhost:3000", "User-Agent": "Pandora/0.0.1"}, "host": "localhost:3000"} -{"uri": "/17", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "localhost:3000", "User-Agent": "Pandora/0.0.1"}, "host": "localhost:3000"} -{"uri": "/18", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "localhost:3000", "User-Agent": "Pandora/0.0.1"}, "host": "localhost:3000"} -{"uri": "/19", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "localhost:3000", "User-Agent": "Pandora/0.0.1"}, "host": "localhost:3000"} -{"uri": "/20", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "localhost:3000", "User-Agent": "Pandora/0.0.1"}, "host": "localhost:3000"} -{"uri": "/21", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "localhost:3000", "User-Agent": "Pandora/0.0.1"}, "host": "localhost:3000"} -{"uri": "/22", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "localhost:3000", "User-Agent": "Pandora/0.0.1"}, "host": "localhost:3000"} -{"uri": "/23", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "localhost:3000", "User-Agent": "Pandora/0.0.1"}, "host": "localhost:3000"} -{"tag": "hello", "uri": "/24", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "localhost:3000", "User-Agent": "Pandora/0.0.1"}, "host": "localhost:3000"} diff --git a/gun/log.go b/gun/log.go deleted file mode 100644 index b05444366..000000000 --- a/gun/log.go +++ /dev/null @@ -1,30 +0,0 @@ -package gun - -import ( - "log" - "time" - - "golang.org/x/net/context" - - "github.com/yandex/pandora/aggregate" - "github.com/yandex/pandora/ammo" - "github.com/yandex/pandora/config" -) - -type LogGun struct { - results chan<- *aggregate.Sample -} - -func (l *LogGun) BindResultsTo(results chan<- *aggregate.Sample) { - l.results = results -} - -func (l *LogGun) Shoot(ctx context.Context, a ammo.Ammo) error { - log.Println("Log message: ", a.(*ammo.Log).Message) - l.results <- aggregate.AcquireSample(float64(time.Now().UnixNano())/1e9, "REQUEST") - return nil -} - -func NewLogGunFromConfig(c *config.Gun) (g Gun, err error) { - return &LogGun{}, nil -} diff --git a/gun/spdy/spdy.go b/gun/spdy/spdy.go deleted file mode 100644 index d78cb31c1..000000000 --- a/gun/spdy/spdy.go +++ /dev/null @@ -1,201 +0,0 @@ -package spdy - -import ( - "crypto/tls" - "errors" - "fmt" - "io" - "io/ioutil" - "log" - "net/http" - "time" - - "github.com/amahi/spdy" - - "github.com/yandex/pandora/aggregate" - "github.com/yandex/pandora/ammo" - "github.com/yandex/pandora/config" - "github.com/yandex/pandora/gun" - "golang.org/x/net/context" -) - -type SpdyGun struct { - target string - client *spdy.Client - results chan<- *aggregate.Sample - closed bool -} - -func (sg *SpdyGun) BindResultsTo(results chan<- *aggregate.Sample) { - sg.results = results -} - -func (sg *SpdyGun) Shoot(ctx context.Context, a ammo.Ammo) error { - if sg.client == nil { - if err := sg.Connect(); err != nil { - return err - } - } - - start := time.Now() - ss := aggregate.AcquireSample(float64(start.UnixNano())/1e9, "REQUEST") - defer func() { - ss.RT = int(time.Since(start).Seconds() * 1e6) - sg.results <- ss - }() - // now send the request to obtain a http response - ha, ok := a.(*ammo.Http) - if !ok { - panic(fmt.Sprintf("Got '%T' instead of 'HttpAmmo'", a)) - } - if ha.Tag != "" { - ss.Tag += "|" + ha.Tag - } - - req, err := http.NewRequest(ha.Method, "https://"+ha.Host+ha.Uri, nil) - if err != nil { - log.Printf("Error making HTTP request: %s\n", err) - ss.Err = err - ss.NetCode = 999 - return err - } - res, err := sg.client.Do(req) - if err != nil { - log.Printf("Error performing a request: %s\n", err) - ss.Err = err - ss.NetCode = 999 - return err - } - - defer res.Body.Close() - _, err = io.Copy(ioutil.Discard, res.Body) - if err != nil { - log.Printf("Error reading response body: %s\n", err) - ss.Err = err - ss.NetCode = 999 - return err - } - // TODO: make this an optional verbose answ_log output - //data := make([]byte, int(res.ContentLength)) - // _, err = res.Body.(io.Reader).Read(data) - // fmt.Println(string(data)) - ss.ProtoCode = res.StatusCode - return err -} - -func (sg *SpdyGun) Close() { - sg.closed = true - if sg.client != nil { - sg.client.Close() - } -} - -func (sg *SpdyGun) Connect() error { - // FIXME: rewrite connection logic, it isn't thread safe right now. - start := time.Now() - ss := aggregate.AcquireSample(float64(start.UnixNano())/1e9, "CONNECT") - defer func() { - ss.RT = int(time.Since(start).Seconds() * 1e6) - sg.results <- ss - }() - config := tls.Config{ - InsecureSkipVerify: true, - NextProtos: []string{"spdy/3.1"}, - } - conn, err := tls.Dial("tcp", sg.target, &config) - if err != nil { - ss.Err = err - ss.NetCode = 999 - return err - } - client, err := spdy.NewClientConn(conn) - if err != nil { - ss.Err = err - ss.NetCode = 999 - return err - } else { - ss.ProtoCode = 200 - } - if sg.client != nil { - sg.Close() - } - sg.client = client - - return nil -} - -func (sg *SpdyGun) Ping() { - if sg.client == nil { - return - } - pingStart := time.Now() - - pinged, err := sg.client.Ping(time.Second * 15) - if err != nil { - log.Printf("client: ping: %s\n", err) - } - if !pinged { - log.Printf("client: ping: timed out\n") - } - ss := aggregate.AcquireSample(float64(pingStart.UnixNano())/1e9, "PING") - ss.RT = int(time.Since(pingStart).Seconds() * 1e6) - - if err == nil && pinged { - ss.ProtoCode = 200 - } else { - ss.Err = err - ss.ProtoCode = 500 - } - sg.results <- ss - if err != nil { - sg.Connect() - } -} - -func (sg *SpdyGun) startAutoPing(pingPeriod time.Duration) { - if pingPeriod > 0 { - go func() { - for range time.NewTicker(pingPeriod).C { - if sg.closed { - return - } - sg.Ping() - } - }() - } -} - -func New(c *config.Gun) (gun.Gun, error) { - params := c.Parameters - if params == nil { - return nil, errors.New("Parameters not specified") - } - target, ok := params["Target"] - if !ok { - return nil, errors.New("Target not specified") - } - var pingPeriod time.Duration - paramPingPeriod, ok := params["PingPeriod"] - if !ok { - paramPingPeriod = 120.0 // TODO: move this default elsewhere - } - switch t := paramPingPeriod.(type) { - case float64: - pingPeriod = time.Duration(paramPingPeriod.(float64)*1e3) * time.Millisecond - default: - return nil, fmt.Errorf("Period is of the wrong type."+ - " Expected 'float64' got '%T'", t) - } - var g gun.Gun - switch t := target.(type) { - case string: - g = &SpdyGun{ - target: target.(string), - } - default: - return nil, fmt.Errorf("Target is of the wrong type."+ - " Expected 'string' got '%T'", t) - } - g.(*SpdyGun).startAutoPing(pingPeriod) - return g, nil -} diff --git a/gun/spdy/spdy_test.go b/gun/spdy/spdy_test.go deleted file mode 100644 index c33482aa8..000000000 --- a/gun/spdy/spdy_test.go +++ /dev/null @@ -1,161 +0,0 @@ -package spdy - -import ( - "flag" - "log" - "net/http" - "os" - "testing" - "time" - - "github.com/SlyMarbo/spdy" // we specially use spdy server from another library - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/yandex/pandora/aggregate" - "github.com/yandex/pandora/ammo" - "github.com/yandex/pandora/config" - "github.com/yandex/pandora/utils" - "golang.org/x/net/context" -) - -func TestSpdyGun(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) - defer cancel() - - result := make(chan *aggregate.Sample) - - gun := &SpdyGun{ - target: "localhost:3000", - results: result, - } - promise := utils.Promise(func() error { - defer gun.Close() - defer close(result) - return gun.Shoot(ctx, &ammo.Http{ - Host: "example.org", - Method: "GET", - Uri: "/path", - Headers: map[string]string{ - "Accept": "*/*", - "Accept-Encoding": "gzip, deflate", - "Host": "example.org", - "User-Agent": "Pandora/0.0.1", - }, - }) - }) - - results := aggregate.Drain(ctx, result) - require.Len(t, results, 2) - { - // first result is connect - - assert.Equal(t, "CONNECT", results[0].Tag) - assert.Equal(t, 200, results[0].ProtoCode) - } - { - // second result is request - - assert.Equal(t, "REQUEST", results[1].Tag) - assert.Equal(t, 200, results[1].ProtoCode) - } - - // TODO: test scenaries with errors - - select { - case err := <-promise: - require.NoError(t, err) - case <-ctx.Done(): - t.Fatal(ctx.Err()) - } - -} - -func TestSpdyConnectPing(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) - defer cancel() - - result := make(chan *aggregate.Sample) - - gun := &SpdyGun{ - target: "localhost:3000", - results: result, - } - promise := utils.Promise(func() error { - defer gun.Close() - defer close(result) - if err := gun.Connect(); err != nil { - return err - } - gun.Ping() - return nil - }) - - results := aggregate.Drain(ctx, result) - require.Len(t, results, 2) - { - // first result is connect - - assert.Equal(t, "CONNECT", results[0].Tag) - assert.Equal(t, 200, results[0].ProtoCode) - } - { - // second result is PING - - assert.Equal(t, "PING", results[1].Tag) - assert.Equal(t, 200, results[1].ProtoCode) - } - select { - case err := <-promise: - require.NoError(t, err) - case <-ctx.Done(): - t.Fatal(ctx.Err()) - } - -} - -func TestNewSpdyGun(t *testing.T) { - spdyConfig := &config.Gun{ - GunType: "spdy", - Parameters: map[string]interface{}{ - "Target": "localhost:3000", - "PingPeriod": 5.0, - }, - } - g, err := New(spdyConfig) - assert.NoError(t, err) - _, ok := g.(*SpdyGun) - assert.Equal(t, true, ok) - - failSpdyConfig := &config.Gun{ - GunType: "spdy", - Parameters: map[string]interface{}{ - "Target": "localhost:3000", - "PingPeriod": "not-a-number", - }, - } - _, err = New(failSpdyConfig) - assert.Error(t, err) -} - -func runSpdyTestServer() { - http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "text/plain") - w.Write([]byte("This is an example server.\n")) - }) - - //use spdy's Listen and serve - log.Println("Run spdy server on localhost:3000") - err := spdy.ListenAndServeTLS("localhost:3000", - "./testdata/test.crt", "./testdata/test.key", nil) - if err != nil { - //error handling here - log.Panic(err) - } -} - -func TestMain(m *testing.M) { - flag.Parse() - go runSpdyTestServer() - time.Sleep(time.Millisecond * 5) // wait for server - os.Exit(m.Run()) -} diff --git a/gun/spdy/testdata/ammo.jsonline b/gun/spdy/testdata/ammo.jsonline deleted file mode 100644 index 47c3d85af..000000000 --- a/gun/spdy/testdata/ammo.jsonline +++ /dev/null @@ -1,25 +0,0 @@ -{"uri": "/00", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/01", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"tag": "hello", "uri": "/02", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/03", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/04", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/05", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/06", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/07", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"tag": "hello", "uri": "/08", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/09", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/10", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/11", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/12", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/13", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/14", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/15", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/16", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/17", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/18", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/19", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/20", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/21", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/22", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"uri": "/23", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} -{"tag": "hello", "uri": "/24", "method": "GET", "headers": {"Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Host": "example.org", "User-Agent": "Pandora/0.0.1"}, "host": "example.org"} diff --git a/gun/spdy/testdata/test.crt b/gun/spdy/testdata/test.crt deleted file mode 100644 index f3903e603..000000000 --- a/gun/spdy/testdata/test.crt +++ /dev/null @@ -1,13 +0,0 @@ ------BEGIN CERTIFICATE----- -MIICATCCAWoCCQCqSTiZw8HwNjANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJB -VTETMBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lkZ2l0 -cyBQdHkgTHRkMB4XDTE1MTExNTExMjcxMloXDTE2MTExNDExMjcxMlowRTELMAkG -A1UEBhMCQVUxEzARBgNVBAgTClNvbWUtU3RhdGUxITAfBgNVBAoTGEludGVybmV0 -IFdpZGdpdHMgUHR5IEx0ZDCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAzV40 -2rkGMG/+DtLXljfm+2fub4B1+3uREKZzUI7gpPQMVRWbaZDVWpfG7MJ0IcLa2IFw -sI8IK6NotjX2paAfABUpv9MvFYCIb6w6F+VLhMOCgARPIp46foTBnS1HHwtwvdbj -MdGQyslf2zIw5xYI5W5f7gLeZQrEXGf5xId82n8CAwEAATANBgkqhkiG9w0BAQUF -AAOBgQBarNn2B8PyEltCInlqo91iZ190U0OxDHdF0b65SorsroxAvviIqUGzfy4E -LoMDCdrEwGRaXy0SJPloBch1EpzKWQDN++RtxLoAaYkyVaqnxJpMrq3IptDcTINv -1jp/D4S4PQ8HXd6iLSw6s85J3WAFnq4j1Hb0vacSKiiaa3iMhw== ------END CERTIFICATE----- diff --git a/gun/spdy/testdata/test.key b/gun/spdy/testdata/test.key deleted file mode 100644 index a4f361054..000000000 --- a/gun/spdy/testdata/test.key +++ /dev/null @@ -1,15 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIICXgIBAAKBgQDNXjTauQYwb/4O0teWN+b7Z+5vgHX7e5EQpnNQjuCk9AxVFZtp -kNVal8bswnQhwtrYgXCwjwgro2i2NfaloB8AFSm/0y8VgIhvrDoX5UuEw4KABE8i -njp+hMGdLUcfC3C91uMx0ZDKyV/bMjDnFgjlbl/uAt5lCsRcZ/nEh3zafwIDAQAB -AoGBAIvfSehedFSxMjwuZh8t2+gHFWJj//dUZa0NcSTA60ft4ChjGwJ/XpDhJt9C -YY5NBHjJUFMwV8RA+6R+hIqxz+opD0tjnVje77s8mY2x3K79tRjN3H3hqLgdZ4DW -lDUx92xUnq4ZOPHBH6UFWjfq9U3boUi6Ojh72O6QLnjkHq1hAkEA+WUZJfqAE5g+ -Uy3oVjPJAkQGvrtJhjoL2ioTneDT7BJDGsbFVHr7VOFwzo9csp75bV+RHQ3ktolr -bJ3AgVDR9QJBANLOmt4/0nKCbhIzbMTsALPBEClaPozNk8f7UYDHs+e12VgAa4aQ -vBqUW6Hf6QODSkN9XfSvs+9H+yNugAdpziMCQDa11/t95aTmzB1hP8vRVnJZzAUB -fxKkHz9/5YfH75WlN/Viz51F0WcAgce+qX5B48P7qBDwoFDu+pL8VGPKABUCQQDJ -y0qGe5OpV9+j/qQdMUlOyP3+h8uen1/arGINo508rl/6lJRZgQz/aIr6vstgOHqL -dmPNSbA12rK17R4Tep3/AkEAkf/CF3tWetGuQrWpXZstiPNKSQAFwO0p0v7WMlXL -zxC30NM4A5DsNELbOReMoWuJz7XAdNH42arwcyrElO0iiw== ------END RSA PRIVATE KEY----- diff --git a/labels.json b/labels.json new file mode 100644 index 000000000..78dbce7d9 --- /dev/null +++ b/labels.json @@ -0,0 +1,82 @@ +[ + { + "name": "Priority: Critical", + "color": "#e11d21" + }, + { + "name": "Priority: High", + "color": "#eb6420" + }, + { + "name": "Priority: Low", + "color": "#bfd4f2" + }, + { + "name": "Priority: Medium", + "color": "#009800" + }, + { + "name": "Priority: Minor", + "color": "#c5def5" + }, + { + "name": "Resolution: Duplicate", + "color": "#cccccc" + }, + { + "name": "Resolution: Invalid", + "color": "#e6e6e6" + }, + { + "name": "Resolution: Resolved", + "color": "#fef2c0" + }, + { + "name": "Resolution: Wontfix", + "color": "#ffffff" + }, + { + "name": "Status: Available", + "color": "#bfe5bf" + }, + { + "name": "Status: Blocked", + "color": "#e11d21" + }, + { + "name": "Status: In Progress", + "color": "#cccccc" + }, + { + "name": "Status: Resolved", + "color": "#fef2c0" + }, + { + "name": "Status: Review", + "color": "#fbca04" + }, + { + "name": "Type: Bug", + "color": "#e11d21" + }, + { + "name": "Type: Enhancement", + "color": "#bfd4f2" + }, + { + "name": "Type: Epic", + "color": "#f9d0c4" + }, + { + "name": "Type: Feature", + "color": "#c2e0c6" + }, + { + "name": "Type: Maintenance", + "color": "#fef2c0" + }, + { + "name": "Type: Question", + "color": "#d4c5f9" + } +] \ No newline at end of file diff --git a/lib/errutil/errutil.go b/lib/errutil/errutil.go new file mode 100644 index 000000000..bffd469b7 --- /dev/null +++ b/lib/errutil/errutil.go @@ -0,0 +1,43 @@ +// Copyright (c) 2018 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package errutil + +import ( + "context" + + "github.com/hashicorp/go-multierror" + "github.com/pkg/errors" +) + +type StackTracer interface { + StackTrace() errors.StackTrace +} + +// FIXME(skipor): test +func Join(err1, err2 error) error { + switch { + case err1 == nil: + return err2 + case err2 == nil: + return err1 + default: + return multierror.Append(err1, err2) + } +} + +func IsNotCtxError(ctx context.Context, err error) bool { + if err == nil { + return false + } + select { + case <-ctx.Done(): + if ctx.Err() == errors.Cause(err) { // Support github.com/pkg/errors wrapping + return false + } + default: + } + return true +} diff --git a/lib/errutil/errutil_suite_test.go b/lib/errutil/errutil_suite_test.go new file mode 100644 index 000000000..7a82d5197 --- /dev/null +++ b/lib/errutil/errutil_suite_test.go @@ -0,0 +1,38 @@ +package errutil + +import ( + "context" + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/pkg/errors" + "github.com/yandex/pandora/lib/ginkgoutil" +) + +func TestErrutil(t *testing.T) { + ginkgoutil.RunSuite(t, "Errutil Suite") +} + +var _ = Describe("Iscoreutil.IsNotCtxErroror", func() { + canceledContext, cancel := context.WithCancel(context.Background()) + cancel() + + It("nil error", func() { + Expect(IsNotCtxError(context.Background(), nil)).To(BeFalse()) + }) + + It("context error", func() { + Expect(IsNotCtxError(canceledContext, context.Canceled)).To(BeFalse()) + }) + + It("caused by context error", func() { + Expect(IsNotCtxError(canceledContext, errors.Wrap(context.Canceled, "new err"))).To(BeFalse()) + }) + + It("usual error", func() { + err := errors.New("new err") + Expect(IsNotCtxError(canceledContext, err)).To(BeTrue()) + Expect(IsNotCtxError(context.Background(), err)).To(BeTrue()) + }) +}) diff --git a/lib/ginkgoutil/ginkgo.go b/lib/ginkgoutil/ginkgo.go new file mode 100644 index 000000000..51c2c731e --- /dev/null +++ b/lib/ginkgoutil/ginkgo.go @@ -0,0 +1,68 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package ginkgoutil + +import ( + "strings" + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/onsi/gomega/format" + "github.com/spf13/viper" + "github.com/stretchr/testify/mock" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +func SetupSuite() { + format.UseStringerRepresentation = true // Otherwise error stacks have binary format. + ReplaceGlobalLogger() + RegisterFailHandler(Fail) +} + +func RunSuite(t *testing.T, description string) { + SetupSuite() + RunSpecs(t, description) +} + +func ReplaceGlobalLogger() *zap.Logger { + log := NewLogger() + zap.ReplaceGlobals(log) + zap.RedirectStdLog(log) + return log +} + +func NewLogger() *zap.Logger { + conf := zap.NewDevelopmentConfig() + enc := zapcore.NewConsoleEncoder(conf.EncoderConfig) + core := zapcore.NewCore(enc, zapcore.AddSync(GinkgoWriter), zap.DebugLevel) + log := zap.New(core, zap.AddCaller(), zap.AddStacktrace(zap.DPanicLevel)) + return log +} + +type Mock interface { + AssertExpectations(t mock.TestingT) bool + AssertNotCalled(t mock.TestingT, methodName string, arguments ...interface{}) bool +} + +func AssertExpectations(mocks ...Mock) { + for _, m := range mocks { + m.AssertExpectations(GinkgoT(1)) + } +} + +func AssertNotCalled(mock Mock, methodName string) { + mock.AssertNotCalled(GinkgoT(1), methodName) +} + +func ParseYAML(data string) map[string]interface{} { + v := viper.New() + v.SetConfigType("yaml") + err := v.ReadConfig(strings.NewReader(data)) + Expect(err).NotTo(HaveOccurred()) + return v.AllSettings() +} diff --git a/lib/ginkgoutil/matchers.go b/lib/ginkgoutil/matchers.go new file mode 100644 index 000000000..5ecf59181 --- /dev/null +++ b/lib/ginkgoutil/matchers.go @@ -0,0 +1,18 @@ +// Copyright (c) 2018 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package ginkgoutil + +import ( + "reflect" + + . "github.com/onsi/gomega" +) + +func ExpectFuncsEqual(f1, f2 interface{}) { + val1 := reflect.ValueOf(f1) + val2 := reflect.ValueOf(f2) + Expect(val1.Pointer()).To(Equal(val2.Pointer())) +} diff --git a/lib/ioutil2/closer.go b/lib/ioutil2/closer.go new file mode 100644 index 000000000..3fcfea9be --- /dev/null +++ b/lib/ioutil2/closer.go @@ -0,0 +1,11 @@ +// Copyright (c) 2018 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package ioutil2 + +// NopCloser may be embedded to any struct to implement io.Closer doing nothing on closer. +type NopCloser struct{} + +func (NopCloser) Close() error { return nil } diff --git a/lib/ioutil2/funcs.go b/lib/ioutil2/funcs.go new file mode 100644 index 000000000..4b654bb33 --- /dev/null +++ b/lib/ioutil2/funcs.go @@ -0,0 +1,16 @@ +// Copyright (c) 2016 Yandex LLC. All rights reserved. +// Author: Vladimir Skipor + +package ioutil2 + +type ReaderFunc func(p []byte) (n int, err error) +type WriterFunc func(p []byte) (n int, err error) +type CloserFunc func() error + +func (f ReaderFunc) Read(p []byte) (int, error) { return f(p) } +func (f WriterFunc) Write(p []byte) (int, error) { return f(p) } +func (f CloserFunc) Close() error { return f() } + +type StringerFunc func() string + +func (f StringerFunc) String() string { return f() } diff --git a/lib/ioutil2/io_mocks_test.go b/lib/ioutil2/io_mocks_test.go new file mode 100644 index 000000000..de309a6fc --- /dev/null +++ b/lib/ioutil2/io_mocks_test.go @@ -0,0 +1,39 @@ +// Copyright (c) 2016 Yandex LLC. All rights reserved. +// Author: Vladimir Skipor + +package ioutil2 + +import "io" + +// File for generation mocks from io package. +// NOTE: remove _test filename suffix before run mockery. + +//go:generate mockery -name=Closer -case=underscore -outpkg=iomock + +type Closer interface { + io.Closer +} + +//go:generate mockery -name=Writer -case=underscore -outpkg=iomock + +type Writer interface { + io.Writer +} + +//go:generate mockery -name=Reader -case=underscore -outpkg=iomock + +type Reader interface { + io.Writer +} + +//go:generate mockery -name=ReadCloser -case=underscore -outpkg=iomock + +type ReadCloser interface { + io.ReadCloser +} + +//go:generate mockery -name=WriteCloser -case=underscore -outpkg=iomock + +type WriteCloser interface { + io.WriteCloser +} diff --git a/lib/ioutil2/mocks/closer.go b/lib/ioutil2/mocks/closer.go new file mode 100644 index 000000000..edce28032 --- /dev/null +++ b/lib/ioutil2/mocks/closer.go @@ -0,0 +1,23 @@ +// Code generated by mockery v1.0.0 +package iomock + +import mock "github.com/stretchr/testify/mock" + +// Closer is an autogenerated mock type for the Closer type +type Closer struct { + mock.Mock +} + +// Close provides a mock function with given fields: +func (_m *Closer) Close() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} diff --git a/lib/ioutil2/mocks/read_closer.go b/lib/ioutil2/mocks/read_closer.go new file mode 100644 index 000000000..ac4af78ac --- /dev/null +++ b/lib/ioutil2/mocks/read_closer.go @@ -0,0 +1,44 @@ +// Code generated by mockery v1.0.0 +package iomock + +import mock "github.com/stretchr/testify/mock" + +// ReadCloser is an autogenerated mock type for the ReadCloser type +type ReadCloser struct { + mock.Mock +} + +// Close provides a mock function with given fields: +func (_m *ReadCloser) Close() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Read provides a mock function with given fields: p +func (_m *ReadCloser) Read(p []byte) (int, error) { + ret := _m.Called(p) + + var r0 int + if rf, ok := ret.Get(0).(func([]byte) int); ok { + r0 = rf(p) + } else { + r0 = ret.Get(0).(int) + } + + var r1 error + if rf, ok := ret.Get(1).(func([]byte) error); ok { + r1 = rf(p) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/lib/ioutil2/mocks/reader.go b/lib/ioutil2/mocks/reader.go new file mode 100644 index 000000000..dd98a592b --- /dev/null +++ b/lib/ioutil2/mocks/reader.go @@ -0,0 +1,30 @@ +// Code generated by mockery v1.0.0 +package iomock + +import mock "github.com/stretchr/testify/mock" + +// Reader is an autogenerated mock type for the Reader type +type Reader struct { + mock.Mock +} + +// Write provides a mock function with given fields: p +func (_m *Reader) Write(p []byte) (int, error) { + ret := _m.Called(p) + + var r0 int + if rf, ok := ret.Get(0).(func([]byte) int); ok { + r0 = rf(p) + } else { + r0 = ret.Get(0).(int) + } + + var r1 error + if rf, ok := ret.Get(1).(func([]byte) error); ok { + r1 = rf(p) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/lib/ioutil2/mocks/write_closer.go b/lib/ioutil2/mocks/write_closer.go new file mode 100644 index 000000000..0752b976a --- /dev/null +++ b/lib/ioutil2/mocks/write_closer.go @@ -0,0 +1,44 @@ +// Code generated by mockery v1.0.0 +package iomock + +import mock "github.com/stretchr/testify/mock" + +// WriteCloser is an autogenerated mock type for the WriteCloser type +type WriteCloser struct { + mock.Mock +} + +// Close provides a mock function with given fields: +func (_m *WriteCloser) Close() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Write provides a mock function with given fields: p +func (_m *WriteCloser) Write(p []byte) (int, error) { + ret := _m.Called(p) + + var r0 int + if rf, ok := ret.Get(0).(func([]byte) int); ok { + r0 = rf(p) + } else { + r0 = ret.Get(0).(int) + } + + var r1 error + if rf, ok := ret.Get(1).(func([]byte) error); ok { + r1 = rf(p) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/lib/ioutil2/mocks/writer.go b/lib/ioutil2/mocks/writer.go new file mode 100644 index 000000000..e6d2fa8d0 --- /dev/null +++ b/lib/ioutil2/mocks/writer.go @@ -0,0 +1,30 @@ +// Code generated by mockery v1.0.0 +package iomock + +import mock "github.com/stretchr/testify/mock" + +// Writer is an autogenerated mock type for the Writer type +type Writer struct { + mock.Mock +} + +// Write provides a mock function with given fields: p +func (_m *Writer) Write(p []byte) (int, error) { + ret := _m.Called(p) + + var r0 int + if rf, ok := ret.Get(0).(func([]byte) int); ok { + r0 = rf(p) + } else { + r0 = ret.Get(0).(int) + } + + var r1 error + if rf, ok := ret.Get(1).(func([]byte) error); ok { + r1 = rf(p) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/lib/ioutil2/reader.go b/lib/ioutil2/reader.go new file mode 100644 index 000000000..261e93de7 --- /dev/null +++ b/lib/ioutil2/reader.go @@ -0,0 +1,50 @@ +// Copyright (c) 2018 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package ioutil2 + +import "io" + +type ReaderWrapper interface { + Unwrap() io.Reader +} + +// TODO(skipor): test + +func NewMultiPassReader(r io.Reader, passes int) io.Reader { + if passes == 1 { + return r + } + rs, isSeakable := r.(io.ReadSeeker) + if !isSeakable { + return r + } + return &MultiPassReader{rs: rs, passesLimit: passes} +} + +type MultiPassReader struct { + rs io.ReadSeeker + passesCount int + passesLimit int +} + +func (r *MultiPassReader) Read(p []byte) (n int, err error) { + n, err = r.rs.Read(p) + if err == io.EOF { + r.passesCount++ + if r.passesLimit <= 0 || r.passesCount < r.passesLimit { + _, err = r.rs.Seek(0, io.SeekStart) + } + } + return +} + +func (r *MultiPassReader) PassesLeft() int { + return r.PassesLeft() +} + +func (r *MultiPassReader) Unwrap() io.Reader { + return r.rs +} diff --git a/lib/ioutil2/writer.go b/lib/ioutil2/writer.go new file mode 100644 index 000000000..477dbc85c --- /dev/null +++ b/lib/ioutil2/writer.go @@ -0,0 +1,35 @@ +// Copyright (c) 2018 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package ioutil2 + +import ( + "bufio" + "bytes" + "io" +) + +type StringWriter interface { + WriteString(s string) (n int, err error) +} + +// ByteWriter represents efficient io.Writer, that don't need buffering. +// Implemented by *bufio.Writer and *bytes.Buffer. +type ByteWriter interface { + io.Writer + StringWriter + io.ByteWriter + io.ReaderFrom +} + +var _ ByteWriter = &bufio.Writer{} +var _ ByteWriter = &bytes.Buffer{} + +func NewCallbackWriter(w io.Writer, onWrite func()) WriterFunc { + return func(p []byte) (n int, err error) { + onWrite() + return w.Write(p) + } +} diff --git a/lib/monitoring/counter.go b/lib/monitoring/counter.go new file mode 100644 index 000000000..405e52cb4 --- /dev/null +++ b/lib/monitoring/counter.go @@ -0,0 +1,43 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package monitoring + +import ( + "expvar" + "strconv" + + "github.com/uber-go/atomic" +) + +// TODO: use one rcrowley/go-metrics instead. + +type Counter struct { + i atomic.Int64 +} + +var _ expvar.Var = (*Counter)(nil) + +func (c *Counter) String() string { + return strconv.FormatInt(c.i.Load(), 10) +} + +func (c *Counter) Add(delta int64) { + c.i.Add(delta) +} + +func (c *Counter) Set(value int64) { + c.i.Store(value) +} + +func (c *Counter) Get() int64 { + return c.i.Load() +} + +func NewCounter(name string) *Counter { + v := &Counter{} + expvar.Publish(name, v) + return v +} diff --git a/lib/netutil/dial.go b/lib/netutil/dial.go new file mode 100644 index 000000000..71709873f --- /dev/null +++ b/lib/netutil/dial.go @@ -0,0 +1,115 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package netutil + +import ( + "context" + "net" + "sync" + + "github.com/pkg/errors" +) + +//go:generate mockery -name=Dialer -case=underscore -outpkg=netmock + +type Dialer interface { + DialContext(ctx context.Context, net, addr string) (net.Conn, error) +} + +var _ Dialer = &net.Dialer{} + +type DialerFunc func(ctx context.Context, network, address string) (net.Conn, error) + +func (f DialerFunc) DialContext(ctx context.Context, network, address string) (net.Conn, error) { + return f(ctx, network, address) +} + +// NewDNSCachingDialer returns dialer with primitive DNS caching logic +// that remembers remote address on first try, and use it in future. +func NewDNSCachingDialer(dialer Dialer, cache DNSCache) DialerFunc { + return func(ctx context.Context, network, addr string) (conn net.Conn, err error) { + resolved, ok := cache.Get(addr) + if ok { + return dialer.DialContext(ctx, network, resolved) + } + conn, err = dialer.DialContext(ctx, network, addr) + if err != nil { + return + } + remoteAddr := conn.RemoteAddr().(*net.TCPAddr) + _, port, err := net.SplitHostPort(addr) + if err != nil { + conn.Close() + return nil, errors.Wrap(err, "invalid address, but successful dial - should not happen") + } + cache.Add(addr, net.JoinHostPort(remoteAddr.IP.String(), port)) + return + } +} + +var DefaultDNSCache = &SimpleDNSCache{} + +// LookupReachable tries to resolve addr via connecting to it. +// This method has much more overhead, but get guaranteed reachable resolved addr. +// Example: host is resolved to IPv4 and IPv6, but IPv4 is not working on machine. +// LookupReachable will return IPv6 in that case. +func LookupReachable(addr string) (string, error) { + d := net.Dialer{DualStack: true} + conn, err := d.Dial("tcp", addr) + if err != nil { + return "", err + } + defer conn.Close() + _, port, err := net.SplitHostPort(addr) + if err != nil { + return "", err + } + remoteAddr := conn.RemoteAddr().(*net.TCPAddr) + return net.JoinHostPort(remoteAddr.IP.String(), port), nil +} + +// WarmDNSCache tries connect to addr, and adds conn remote ip + addr port to cache. +func WarmDNSCache(c DNSCache, addr string) error { + var d net.Dialer + conn, err := NewDNSCachingDialer(&d, c).DialContext(context.Background(), "tcp", addr) + if err != nil { + return err + } + conn.Close() + return nil +} + +//go:generate mockery -name=DNSCache -case=underscore -outpkg=netmock + +type DNSCache interface { + Get(addr string) (string, bool) + Add(addr, resolved string) +} + +type SimpleDNSCache struct { + rw sync.RWMutex + hostToAddr map[string]string +} + +func (c *SimpleDNSCache) Get(addr string) (resolved string, ok bool) { + c.rw.RLock() + if c.hostToAddr == nil { + c.rw.RUnlock() + return + } + resolved, ok = c.hostToAddr[addr] + c.rw.RUnlock() + return +} + +func (c *SimpleDNSCache) Add(addr, resolved string) { + c.rw.Lock() + if c.hostToAddr == nil { + c.hostToAddr = make(map[string]string) + } + c.hostToAddr[addr] = resolved + c.rw.Unlock() +} diff --git a/lib/netutil/mocks/conn.go b/lib/netutil/mocks/conn.go new file mode 100644 index 000000000..7f10c79c0 --- /dev/null +++ b/lib/netutil/mocks/conn.go @@ -0,0 +1,142 @@ +// Code generated by mockery v1.0.0 +package netmock + +import "github.com/stretchr/testify/mock" +import "net" + +import "time" + +// Conn is an autogenerated mock type for the Conn type +type Conn struct { + mock.Mock +} + +// Close provides a mock function with given fields: +func (_m *Conn) Close() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// LocalAddr provides a mock function with given fields: +func (_m *Conn) LocalAddr() net.Addr { + ret := _m.Called() + + var r0 net.Addr + if rf, ok := ret.Get(0).(func() net.Addr); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(net.Addr) + } + } + + return r0 +} + +// Read provides a mock function with given fields: b +func (_m *Conn) Read(b []byte) (int, error) { + ret := _m.Called(b) + + var r0 int + if rf, ok := ret.Get(0).(func([]byte) int); ok { + r0 = rf(b) + } else { + r0 = ret.Get(0).(int) + } + + var r1 error + if rf, ok := ret.Get(1).(func([]byte) error); ok { + r1 = rf(b) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RemoteAddr provides a mock function with given fields: +func (_m *Conn) RemoteAddr() net.Addr { + ret := _m.Called() + + var r0 net.Addr + if rf, ok := ret.Get(0).(func() net.Addr); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(net.Addr) + } + } + + return r0 +} + +// SetDeadline provides a mock function with given fields: t +func (_m *Conn) SetDeadline(t time.Time) error { + ret := _m.Called(t) + + var r0 error + if rf, ok := ret.Get(0).(func(time.Time) error); ok { + r0 = rf(t) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SetReadDeadline provides a mock function with given fields: t +func (_m *Conn) SetReadDeadline(t time.Time) error { + ret := _m.Called(t) + + var r0 error + if rf, ok := ret.Get(0).(func(time.Time) error); ok { + r0 = rf(t) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SetWriteDeadline provides a mock function with given fields: t +func (_m *Conn) SetWriteDeadline(t time.Time) error { + ret := _m.Called(t) + + var r0 error + if rf, ok := ret.Get(0).(func(time.Time) error); ok { + r0 = rf(t) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Write provides a mock function with given fields: b +func (_m *Conn) Write(b []byte) (int, error) { + ret := _m.Called(b) + + var r0 int + if rf, ok := ret.Get(0).(func([]byte) int); ok { + r0 = rf(b) + } else { + r0 = ret.Get(0).(int) + } + + var r1 error + if rf, ok := ret.Get(1).(func([]byte) error); ok { + r1 = rf(b) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/lib/netutil/mocks/dialer.go b/lib/netutil/mocks/dialer.go new file mode 100644 index 000000000..27e1ea755 --- /dev/null +++ b/lib/netutil/mocks/dialer.go @@ -0,0 +1,34 @@ +// Code generated by mockery v1.0.0 +package netmock + +import "context" +import "github.com/stretchr/testify/mock" +import "net" + +// Dialer is an autogenerated mock type for the Dialer type +type Dialer struct { + mock.Mock +} + +// DialContext provides a mock function with given fields: ctx, _a1, addr +func (_m *Dialer) DialContext(ctx context.Context, _a1 string, addr string) (net.Conn, error) { + ret := _m.Called(ctx, _a1, addr) + + var r0 net.Conn + if rf, ok := ret.Get(0).(func(context.Context, string, string) net.Conn); ok { + r0 = rf(ctx, _a1, addr) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(net.Conn) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok { + r1 = rf(ctx, _a1, addr) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/lib/netutil/mocks/dns_cache.go b/lib/netutil/mocks/dns_cache.go new file mode 100644 index 000000000..ca9979966 --- /dev/null +++ b/lib/netutil/mocks/dns_cache.go @@ -0,0 +1,35 @@ +// Code generated by mockery v1.0.0 +package netmock + +import "github.com/stretchr/testify/mock" + +// DNSCache is an autogenerated mock type for the DNSCache type +type DNSCache struct { + mock.Mock +} + +// Add provides a mock function with given fields: addr, resolved +func (_m *DNSCache) Add(addr string, resolved string) { + _m.Called(addr, resolved) +} + +// Get provides a mock function with given fields: addr +func (_m *DNSCache) Get(addr string) (string, bool) { + ret := _m.Called(addr) + + var r0 string + if rf, ok := ret.Get(0).(func(string) string); ok { + r0 = rf(addr) + } else { + r0 = ret.Get(0).(string) + } + + var r1 bool + if rf, ok := ret.Get(1).(func(string) bool); ok { + r1 = rf(addr) + } else { + r1 = ret.Get(1).(bool) + } + + return r0, r1 +} diff --git a/lib/netutil/netutil_suite_test.go b/lib/netutil/netutil_suite_test.go new file mode 100644 index 000000000..baee6a76a --- /dev/null +++ b/lib/netutil/netutil_suite_test.go @@ -0,0 +1,107 @@ +package netutil + +import ( + "context" + "net" + "strconv" + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/pkg/errors" + "github.com/yandex/pandora/lib/ginkgoutil" + netmock "github.com/yandex/pandora/lib/netutil/mocks" +) + +func TestNetutil(t *testing.T) { + ginkgoutil.RunSuite(t, "Netutil Suite") +} + +var _ = Describe("DNS", func() { + + It("lookup reachable", func() { + listener, err := net.ListenTCP("tcp4", nil) + defer listener.Close() + Expect(err).NotTo(HaveOccurred()) + + port := strconv.Itoa(listener.Addr().(*net.TCPAddr).Port) + addr := "localhost:" + port + expectedResolved := "127.0.0.1:" + port + + resolved, err := LookupReachable(addr) + Expect(err).NotTo(HaveOccurred()) + Expect(resolved).To(Equal(expectedResolved)) + }) + + const ( + addr = "localhost:8888" + resolved = "[::1]:8888" + ) + + It("cache", func() { + cache := &SimpleDNSCache{} + got, ok := cache.Get(addr) + Expect(ok).To(BeFalse()) + Expect(got).To(BeEmpty()) + + cache.Add(addr, resolved) + got, ok = cache.Get(addr) + Expect(ok).To(BeTrue()) + Expect(got).To(Equal(resolved)) + }) + + It("Dialer cache miss", func() { + ctx := context.Background() + mockConn := &netmock.Conn{} + mockConn.On("RemoteAddr").Return(&net.TCPAddr{ + IP: net.IPv6loopback, + Port: 8888, + }) + cache := &netmock.DNSCache{} + cache.On("Get", addr).Return("", false) + cache.On("Add", addr, resolved) + dialer := &netmock.Dialer{} + dialer.On("DialContext", ctx, "tcp", addr).Return(mockConn, nil) + + testee := NewDNSCachingDialer(dialer, cache) + conn, err := testee.DialContext(ctx, "tcp", addr) + Expect(err).NotTo(HaveOccurred()) + Expect(conn).To(Equal(mockConn)) + + ginkgoutil.AssertExpectations(mockConn, cache, dialer) + }) + + It("Dialer cache hit", func() { + ctx := context.Background() + mockConn := &netmock.Conn{} + cache := &netmock.DNSCache{} + cache.On("Get", addr).Return(resolved, true) + dialer := &netmock.Dialer{} + dialer.On("DialContext", ctx, "tcp", resolved).Return(mockConn, nil) + + testee := NewDNSCachingDialer(dialer, cache) + conn, err := testee.DialContext(ctx, "tcp", addr) + Expect(err).NotTo(HaveOccurred()) + Expect(conn).To(Equal(mockConn)) + + ginkgoutil.AssertExpectations(mockConn, cache, dialer) + }) + + It("Dialer cache miss err", func() { + ctx := context.Background() + expectedErr := errors.New("dial failed") + cache := &netmock.DNSCache{} + cache.On("Get", addr).Return("", false) + dialer := &netmock.Dialer{} + dialer.On("DialContext", ctx, "tcp", addr).Return(nil, expectedErr) + + testee := NewDNSCachingDialer(dialer, cache) + conn, err := testee.DialContext(ctx, "tcp", addr) + Expect(err).To(Equal(expectedErr)) + Expect(conn).To(BeNil()) + + ginkgoutil.AssertExpectations(cache, dialer) + }) + +}) diff --git a/lib/tag/debug.go b/lib/tag/debug.go new file mode 100644 index 000000000..aaa7c857c --- /dev/null +++ b/lib/tag/debug.go @@ -0,0 +1,10 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +// +build debug + +package tag + +const Debug = true diff --git a/lib/tag/no_degug.go b/lib/tag/no_degug.go new file mode 100644 index 000000000..0877c0d71 --- /dev/null +++ b/lib/tag/no_degug.go @@ -0,0 +1,10 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +// +build !debug + +package tag + +const Debug = false diff --git a/lib/tag/no_race.go b/lib/tag/no_race.go new file mode 100644 index 000000000..d746b261b --- /dev/null +++ b/lib/tag/no_race.go @@ -0,0 +1,10 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +// +build !race + +package tag + +const Race = false diff --git a/lib/tag/race.go b/lib/tag/race.go new file mode 100644 index 000000000..a19547375 --- /dev/null +++ b/lib/tag/race.go @@ -0,0 +1,10 @@ +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +// +build race + +package tag + +const Race = true diff --git a/lib/testutil/afero.go b/lib/testutil/afero.go new file mode 100644 index 000000000..da164b382 --- /dev/null +++ b/lib/testutil/afero.go @@ -0,0 +1,35 @@ +// Copyright (c) 2018 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package testutil + +import ( + "io" + "io/ioutil" + + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func ReadString(t TestingT, r io.Reader) string { + data, err := ioutil.ReadAll(r) + require.NoError(t, err) + return string(data) +} + +func ReadFileString(t TestingT, fs afero.Fs, name string) string { + getHelper(t).Helper() + data, err := afero.ReadFile(fs, name) + require.NoError(t, err) + return string(data) + +} + +func AssertFileEqual(t TestingT, fs afero.Fs, name string, expected string) { + getHelper(t).Helper() + actual := ReadFileString(t, fs, name) + assert.Equal(t, expected, actual) +} diff --git a/lib/testutil/matchers.go b/lib/testutil/matchers.go new file mode 100644 index 000000000..cda57389c --- /dev/null +++ b/lib/testutil/matchers.go @@ -0,0 +1,88 @@ +// Copyright (c) 2018 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package testutil + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/mock" + "go.uber.org/atomic" +) + +type TestingT interface { + mock.TestingT +} + +type flakyT struct { + t *testing.T + failed atomic.Bool +} + +var _ TestingT = &flakyT{} + +func (ff *flakyT) Logf(format string, args ...interface{}) { + getHelper(ff.t).Helper() + ff.Logf(format, args...) +} + +func (ff *flakyT) Errorf(format string, args ...interface{}) { + getHelper(ff.t).Helper() + ff.t.Logf(format, args...) + ff.failed.Store(true) +} + +func (ff *flakyT) FailNow() { + // WARN: that may not work in complex case. + panic("Failing now!") +} + +func RunFlaky(t *testing.T, test func(t TestingT)) { + const retries = 5 + var passed bool + for i := 1; i <= retries && !passed; i++ { + t.Run(fmt.Sprintf("Retry_%v", i), func(t *testing.T) { + if i == retries { + t.Log("Last retry.") + test(t) + return + } + ff := &flakyT{t: t} + + var hasPanic bool + func() { + defer func() { + hasPanic = recover() != nil + }() + test(ff) + }() + + passed = !ff.failed.Load() && !hasPanic + if passed { + t.Log("Passed!") + } else { + t.Log("Failed! Retrying.") + } + }) + } +} + +// getHelper allows to call t.Helper() without breaking compatibility with go version < 1.9 +func getHelper(t TestingT) helper { + var tInterface interface{} = t + if h, ok := tInterface.(helper); ok { + return h + } + return nopHelper{} +} + +type nopHelper struct{} + +func (nopHelper) Helper() {} + +type helper interface { + Helper() +} diff --git a/lib/testutil/matchers_test.go b/lib/testutil/matchers_test.go new file mode 100644 index 000000000..35f1565de --- /dev/null +++ b/lib/testutil/matchers_test.go @@ -0,0 +1,29 @@ +// Copyright (c) 2018 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package testutil + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestFlakyPassed(t *testing.T) { + var run int + RunFlaky(t, func(t TestingT) { + run++ + assert.True(t, run >= 3) + }) +} + +func TestFlakyPanic(t *testing.T) { + var run int + RunFlaky(t, func(t TestingT) { + run++ + require.True(t, run >= 3) + }) +} diff --git a/lib/testutil/util.go b/lib/testutil/util.go new file mode 100644 index 000000000..d8b2be43e --- /dev/null +++ b/lib/testutil/util.go @@ -0,0 +1,27 @@ +// Copyright (c) 2018 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package testutil + +import ( + "go.uber.org/zap" +) + +func ReplaceGlobalLogger() *zap.Logger { + log := NewLogger() + zap.ReplaceGlobals(log) + zap.RedirectStdLog(log) + return log +} + +func NewLogger() *zap.Logger { + conf := zap.NewDevelopmentConfig() + conf.OutputPaths = []string{"stdout"} + log, err := conf.Build(zap.AddCaller(), zap.AddStacktrace(zap.PanicLevel)) + if err != nil { + panic(err) + } + return log +} diff --git a/lib/zaputil/stack_extract_core.go b/lib/zaputil/stack_extract_core.go new file mode 100644 index 000000000..4387012a6 --- /dev/null +++ b/lib/zaputil/stack_extract_core.go @@ -0,0 +1,164 @@ +// Copyright (c) 2018 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package zaputil + +import ( + "fmt" + + "github.com/pkg/errors" + "go.uber.org/zap" + "go.uber.org/zap/buffer" + "go.uber.org/zap/zapcore" +) + +// NewStackExtractCore returns core that extracts stacktraces from +// error fields with github.com/pkg/errors error types, and append them to +// zapcore.Entry.Stack, on Write. +// That makes stacktraces from errors readable in case of console encoder. +// WARN(skipor): don't call Check of underlying cores, just use LevelEnabler. +// That breaks sampling and other complex logic of choosing log or not entry. +func NewStackExtractCore(c zapcore.Core) zapcore.Core { + return &errStackExtractCore{c, getBuffer()} +} + +type errStackExtractCore struct { + zapcore.Core + stacksBuff zapBuffer +} + +type stackedErr interface { + error + StackTrace() errors.StackTrace +} + +type causer interface { + Cause() error +} + +func (c *errStackExtractCore) With(fields []zapcore.Field) zapcore.Core { + buff := c.cloneBuffer() + fields = extractFieldsStacksToBuff(buff, fields) + return &errStackExtractCore{ + c.Core.With(fields), + buff, + } +} + +func (c *errStackExtractCore) Write(ent zapcore.Entry, fields []zapcore.Field) error { + if c.stacksBuff.Len() == 0 && !hasStacksToExtract(fields) { + return c.Core.Write(ent, fields) + } + buff := c.cloneBuffer() + defer buff.Free() + fields = extractFieldsStacksToBuff(buff, fields) + + if ent.Stack == "" { + ent.Stack = buff.String() + } else { + // Should be rare case, so allocation is OK. + ent.Stack = ent.Stack + "\n" + buff.String() + } + return c.Core.Write(ent, fields) +} + +func (c *errStackExtractCore) Check(ent zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry { + // HACK(skipor): not calling Check of nested. It's ok while we use simple io/tee cores. + // But that breaks sampling logic of underlying cores, for example. + if c.Enabled(ent.Level) { + return ce.AddCore(ent, c) + } + return ce +} + +func (c *errStackExtractCore) cloneBuffer() zapBuffer { + clone := getBuffer() + clone.Write(c.stacksBuff.Bytes()) + return clone +} + +func hasStacksToExtract(fields []zapcore.Field) bool { + for _, field := range fields { + if field.Type != zapcore.ErrorType { + continue + } + _, ok := field.Interface.(stackedErr) + if ok { + return true + } + } + return false +} + +func extractFieldsStacksToBuff(buff zapBuffer, fields []zapcore.Field) []zapcore.Field { + var stacksFound bool + for i, field := range fields { + if field.Type != zapcore.ErrorType { + continue + } + stacked, ok := field.Interface.(stackedErr) + if !ok { + continue + } + if !stacksFound { + stacksFound = true + oldFields := fields + fields = make([]zapcore.Field, len(fields)) + copy(fields, oldFields) + } + if cause, ok := stacked.(causer); ok { + field.Interface = cause.Cause() + } else { + field = zap.String(field.Key, stacked.Error()) + } + fields[i] = field + appendStack(buff, field.Key, stacked.StackTrace()) + } + return fields // Cloned in case modifications. +} + +func appendStack(buff zapBuffer, key string, stack errors.StackTrace) { + if buff.Len() != 0 { + buff.AppendByte('\n') + } + buff.AppendString(key) + buff.AppendString(" stacktrace:") + stack.Format(zapBufferFmtState{buff}, 'v') +} + +type zapBuffer struct{ *buffer.Buffer } + +var _ ioStringWriter = zapBuffer{} + +type ioStringWriter interface { + WriteString(s string) (n int, err error) +} + +func (b zapBuffer) WriteString(s string) (n int, err error) { + b.AppendString(s) + return len(s), nil +} + +var bufferPool = buffer.NewPool() + +func getBuffer() zapBuffer { + return zapBuffer{bufferPool.Get()} +} + +type zapBufferFmtState struct{ zapBuffer } + +var _ fmt.State = zapBufferFmtState{} + +func (zapBufferFmtState) Flag(c int) bool { + switch c { + case '+': + return true + default: + return false + } +} + +func (zapBufferFmtState) Width() (wid int, ok bool) { panic("should not be called") } +func (zapBufferFmtState) Precision() (prec int, ok bool) { panic("should not be called") } diff --git a/lib/zaputil/stack_extract_core_test.go b/lib/zaputil/stack_extract_core_test.go new file mode 100644 index 000000000..c208c4048 --- /dev/null +++ b/lib/zaputil/stack_extract_core_test.go @@ -0,0 +1,136 @@ +// Copyright (c) 2018 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package zaputil + +import ( + "fmt" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/pkg/errors" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "go.uber.org/zap/zaptest/observer" +) + +func noStackFields1() []zapcore.Field { + return []zapcore.Field{ + zap.String("0", "0"), zap.Error(fmt.Errorf("fields 1")), + } +} + +func noStackFields2() []zapcore.Field { + return []zapcore.Field{ + zap.String("1", "1"), zap.Error(fmt.Errorf("fields 2")), + } +} + +var _ = Describe("stack_extract_core", func() { + + It("check integration", func() { + nested, logs := observer.New(zap.DebugLevel) + log := zap.New(NewStackExtractCore(nested)) + + log.Debug("test", noStackFields1()...) + Expect(logs.Len()).To(Equal(1)) + entry := logs.All()[0] + Expect(entry.Message).To(Equal("test")) + Expect(entry.Context).To(Equal(noStackFields1())) + }) + + It("no stacks", func() { + nested, logs := observer.New(zap.DebugLevel) + testee := NewStackExtractCore(nested) + + testee = testee.With(noStackFields1()) + entry := zapcore.Entry{Message: "test"} + testee.Write(entry, noStackFields2()) + + Expect(logs.Len()).To(Equal(1)) + Expect(logs.All()[0]).To(Equal( + observer.LoggedEntry{entry, append(noStackFields1(), noStackFields2()...)}, + )) + }) + + It("stack in write", func() { + const sampleErrMsg = "stacked error msg" + sampleErr := errors.New(sampleErrMsg) + sampleStack := fmt.Sprintf("%+v", sampleErr.(stackedErr).StackTrace()) + + nested, logs := observer.New(zap.DebugLevel) + testee := NewStackExtractCore(nested) + + fields := append(noStackFields1(), zap.Error(sampleErr)) + fieldsCopy := make([]zapcore.Field, len(fields)) + copy(fieldsCopy, fields) + entry := zapcore.Entry{Message: "test"} + testee.Write(entry, fields) + + expectedEntry := entry + expectedEntry.Stack = "error stacktrace:" + sampleStack + Expect(logs.Len()).To(Equal(1)) + Expect(logs.All()[0]).To(Equal( + observer.LoggedEntry{ + expectedEntry, + append(noStackFields1(), zap.String("error", sampleErrMsg)), + }, + )) + Expect(fields).To(Equal(fieldsCopy)) + }) + + It("stack in with", func() { + const sampleErrMsg = "stacked error msg" + sampleCause := fmt.Errorf(sampleErrMsg) + sampleErr := errors.WithStack(sampleCause) + sampleStack := fmt.Sprintf("%+v", sampleErr.(stackedErr).StackTrace()) + + nested, logs := observer.New(zap.DebugLevel) + testee := NewStackExtractCore(nested) + + fields := append(noStackFields1(), zap.Error(sampleErr)) + fieldsCopy := make([]zapcore.Field, len(fields)) + copy(fieldsCopy, fields) + entry := zapcore.Entry{Message: "test"} + testee = testee.With(fields) + testee.Write(entry, nil) + + expectedEntry := entry + expectedEntry.Stack = "error stacktrace:" + sampleStack + Expect(logs.Len()).To(Equal(1)) + Expect(logs.All()[0]).To(Equal( + observer.LoggedEntry{ + expectedEntry, + append(noStackFields1(), zap.Error(sampleCause)), + }, + )) + Expect(fields).To(Equal(fieldsCopy)) + }) + + It("stacks join", func() { + const sampleErrMsg = "stacked error msg" + sampleErr := errors.New(sampleErrMsg) + sampleStack := fmt.Sprintf("%+v", sampleErr.(stackedErr).StackTrace()) + + nested, logs := observer.New(zap.DebugLevel) + testee := NewStackExtractCore(nested) + + const entryStack = "entry stack" + entry := zapcore.Entry{Message: "test", Stack: entryStack} + const customKey = "custom-key" + testee.Write(entry, []zapcore.Field{zap.NamedError(customKey, sampleErr)}) + + expectedEntry := entry + expectedEntry.Stack = entryStack + "\n" + customKey + " stacktrace:" + sampleStack + Expect(logs.Len()).To(Equal(1)) + Expect(logs.All()[0]).To(Equal( + observer.LoggedEntry{ + expectedEntry, + []zapcore.Field{zap.String(customKey, sampleErrMsg)}, + }, + )) + }) + +}) diff --git a/lib/zaputil/zaputil_suite_test.go b/lib/zaputil/zaputil_suite_test.go new file mode 100644 index 000000000..fbeff291d --- /dev/null +++ b/lib/zaputil/zaputil_suite_test.go @@ -0,0 +1,16 @@ +// Copyright (c) 2018 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + +package zaputil + +import ( + "testing" + + "github.com/yandex/pandora/lib/ginkgoutil" +) + +func TestZaputilSuite(t *testing.T) { + ginkgoutil.RunSuite(t, "Zaputil Suite") +} diff --git a/limiter/batch.go b/limiter/batch.go deleted file mode 100644 index eddf7efa0..000000000 --- a/limiter/batch.go +++ /dev/null @@ -1,52 +0,0 @@ -package limiter - -import ( - "github.com/yandex/pandora/utils" - "golang.org/x/net/context" -) - -// batch implements Limiter interface -type batch struct { - limiter - master Limiter - batchSize int -} - -var _ Limiter = (*batch)(nil) - -func (bl *batch) Start(ctx context.Context) error { - defer close(bl.control) - masterCtx, cancelMaster := context.WithCancel(ctx) - masterPromise := utils.PromiseCtx(masterCtx, bl.master.Start) -loop: - for { - select { - case _, more := <-bl.master.Control(): - if !more { - break loop - } - for i := 0; i < bl.batchSize; i++ { - select { - case bl.control <- struct{}{}: - case <-ctx.Done(): - break loop - } - } - case <-ctx.Done(): - break loop - } - } - cancelMaster() - err := <-masterPromise - return err -} - -// NewBatch returns batch limiter that makes batch with size batchSize on every master tick -// master shouldn't be started -func NewBatch(batchSize int, master Limiter) (l Limiter) { - return &batch{ - limiter: limiter{make(chan struct{})}, - master: master, - batchSize: batchSize, - } -} diff --git a/limiter/batch_test.go b/limiter/batch_test.go deleted file mode 100644 index a28dcc22c..000000000 --- a/limiter/batch_test.go +++ /dev/null @@ -1,87 +0,0 @@ -package limiter - -import ( - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "golang.org/x/net/context" - - "github.com/yandex/pandora/utils" -) - -func TestBatchLimiter(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*1) - defer cancel() - - master := &limiter{control: make(chan struct{}, 2)} - for i := 0; i < 2; i++ { - master.control <- struct{}{} - } - close(master.control) - - batch := NewBatch(5, master) - promise := utils.PromiseCtx(ctx, batch.Start) - - i, err := Drain(ctx, batch) - assert.NoError(t, err) - // we should take only 5 tick from master - assert.Equal(t, i, 10) - - select { - case err := <-promise: - require.NoError(t, err) - case <-ctx.Done(): - t.Fatal(ctx.Err()) - } -} - -func TestContextCancelInBatch(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - limitCtx, limitCancel := context.WithCancel(ctx) - limitCancel() - - master := &limiter{control: make(chan struct{}, 10)} - - batch := NewBatch(5, master) - promise := utils.PromiseCtx(limitCtx, batch.Start) - i, err := Drain(ctx, batch) - assert.NoError(t, err) - // we should take only 0 tick from master - assert.Equal(t, i, 0) - - select { - case err := <-promise: - require.NoError(t, err) - case <-ctx.Done(): - t.Fatal(ctx.Err()) - } -} - -func TestContextCancelWhileControlBatchLimiter(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - limitCtx, limitCancel := context.WithCancel(ctx) - - master := &limiter{control: make(chan struct{})} - - batch := NewBatch(5, master) - promise := utils.PromiseCtx(limitCtx, batch.Start) - - select { - case master.control <- struct{}{}: - // we fed master and then cancel context - limitCancel() - case <-ctx.Done(): - t.Fatal(ctx.Err()) - } - - select { - case err := <-promise: - require.NoError(t, err) - case <-ctx.Done(): - t.Fatal(ctx.Err()) - } -} diff --git a/limiter/composite.go b/limiter/composite.go deleted file mode 100644 index b5032d1ea..000000000 --- a/limiter/composite.go +++ /dev/null @@ -1,45 +0,0 @@ -package limiter - -import ( - "fmt" - - "github.com/yandex/pandora/config" - "golang.org/x/net/context" -) - -type composite struct { - limiter - steps []Limiter -} - -var _ Limiter = (*composite)(nil) // check interface - -func (cl *composite) Start(ctx context.Context) error { - defer close(cl.control) -outer_loop: - for _, l := range cl.steps { - inner_loop: - for { - select { - case _, more := <-l.Control(): - if !more { - break inner_loop - } - select { - case cl.control <- struct{}{}: - case <-ctx.Done(): - break outer_loop - } - case <-ctx.Done(): - break outer_loop - } - } - - } - - return nil -} - -func NewCompositeFromConfig(c *config.Limiter) (l Limiter, err error) { - return nil, fmt.Errorf("Not implemented") -} diff --git a/limiter/doc.go b/limiter/doc.go deleted file mode 100644 index 307387eb6..000000000 --- a/limiter/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Limiters helps to describe different schedulers -package limiter diff --git a/limiter/limiter.go b/limiter/limiter.go deleted file mode 100644 index 0c0c62ad2..000000000 --- a/limiter/limiter.go +++ /dev/null @@ -1,47 +0,0 @@ -package limiter - -import ( - "log" - "time" - - "golang.org/x/net/context" -) - -// Limiter interface describes limiter control structure -type Limiter interface { - Start(context.Context) error - Control() <-chan struct{} -} - -// limiter helps to build more complex limiters -type limiter struct { - control chan struct{} -} - -func (l *limiter) Control() <-chan struct{} { - return l.control -} - -func (l *limiter) Start(context.Context) error { - return nil -} - -// Drain counts all ticks from limiter -func Drain(ctx context.Context, l Limiter) (int, error) { - i := 0 -loop: - for { - select { - case _, more := <-l.Control(): - log.Printf("Tick: %s", time.Now().Format("2006-01-02T15:04:05.999999")) - if !more { - log.Printf("Exit drain at: %s", time.Now().Format("2006-01-02T15:04:05.999999")) - break loop - } - i++ - case <-ctx.Done(): - return i, ctx.Err() - } - } - return i, nil -} diff --git a/limiter/limiter_test.go b/limiter/limiter_test.go deleted file mode 100644 index cf6bb9a90..000000000 --- a/limiter/limiter_test.go +++ /dev/null @@ -1,84 +0,0 @@ -package limiter - -//func ExampleBatchLimiter() { -// ap, _ := NewLogAmmoProvider(30) -// rl, _ := NewLoggingResultListener() -// u := &User{ -// name: "Example user", -// ammunition: ap, -// results: rl, -// limiter: NewBatch(10, NewPeriodic(time.Second)), -// done: make(chan bool), -// gun: &LogGun{}, -// } -// go u.run() -// u.ammunition.Start() -// u.results.Start() -// u.limiter.Start() -// <-u.done -// -// log.Println("Done") -// // Output: -//} - -//func TestEmptyPeriodicLimiterConfig(t *testing.T) { -// lc := &config.Limiter{ -// LimiterType: "periodic", -// Parameters: nil, -// } -// l, err := NewPeriodicFromConfig(lc) -// -// if err == nil { -// t.Errorf("Should return error if empty config") -// } -// if l != nil { -// t.Errorf("Should return 'nil' if empty config") -// } -//} - -//func ExampleBatchLimiterConfig() { -// ap, err := ammo.NewLogAmmoProvider(&config.AmmoProvider{ -// AmmoLimit: 30, -// }) -// if err != nil { -// log.Panic(err) -// } -// -// lc := &config.Limiter{ -// LimiterType: "periodic", -// Parameters: map[string]interface{}{ -// "Period": 0.46, -// "BatchSize": 3.0, -// "MaxCount": 5.0, -// }, -// } -// l, err := NewPeriodicFromConfig(lc) -// if err != nil { -// log.Panic(err) -// } -// -// rl, err := aggregate.NewLoggingResultListener(nil) -// if err != nil { -// log.Panic(err) -// } -// -// u := &engine.User{ -// Name: "Example user", -// Ammunition: ap, -// Results: rl, -// Limiter: l, -// Done: make(chan bool), -// Gun: gun.LogGun{}, -// } -// ctx, cancel := context.WithTimeout(context.Background(), time.Second) -// defer cancel() -// -// go u.Run(ctx) -// u.Ammunition.Start(ctx) -// u.Results.Start(ctx) -// u.Limiter.Start(ctx) -// <-u.Done -// -// log.Println("Done") -// // Output: -//} diff --git a/limiter/linear.go b/limiter/linear.go deleted file mode 100644 index 25ea61788..000000000 --- a/limiter/linear.go +++ /dev/null @@ -1,116 +0,0 @@ -package limiter - -import ( - "errors" - "fmt" - "math" - "time" - - "github.com/yandex/pandora/config" - - "golang.org/x/net/context" -) - -// linear implements Limiter interface -type linear struct { - limiter - startRps float64 - endRps float64 - period time.Duration -} - -var _ Limiter = (*linear)(nil) - -func quadraticRightRoot(a, b, c float64) (float64, error) { - discriminant := math.Pow(b, 2) - 4*a*c - if discriminant < 0 { - return 0, errors.New("Discriminant is less then zero") - } - root := (-b + math.Sqrt(discriminant)) / (2 * a) - return root, nil -} - -func (l *linear) Start(ctx context.Context) error { - defer close(l.control) - a := (l.endRps - l.startRps) / l.period.Seconds() / 2.0 - b := l.startRps - maxCount := (a*math.Pow(l.period.Seconds(), 2) + b*l.period.Seconds()) - startTime := time.Now() -loop: - for n := 0.0; n < maxCount; n += 1.0 { - ts, err := quadraticRightRoot(a, b, -n) - if err != nil { - return err - } - waitPeriod := ts - time.Since(startTime).Seconds() - if waitPeriod > 0 { - select { - case <-time.After(time.Duration(waitPeriod*1e9) * time.Nanosecond): - case <-ctx.Done(): - break loop - } - } - select { - case l.control <- struct{}{}: - case <-ctx.Done(): - break loop - - } - - } - // now wait until the end of specified period - waitPeriod := l.period.Seconds() - time.Since(startTime).Seconds() - if waitPeriod > 0 { - select { - case <-time.After(time.Duration(waitPeriod*1e9) * time.Nanosecond): - case <-ctx.Done(): - } - } - return nil -} - -func NewLinear(startRps, endRps, period float64) (l Limiter) { - return &linear{ - // timer-based limiters should have big enough cache - limiter: limiter{make(chan struct{}, 65536)}, - startRps: startRps, - endRps: endRps, - period: time.Duration(period*1e9) * time.Nanosecond, - } -} - -// NewLinearFromConfig returns linear limiter -func NewLinearFromConfig(c *config.Limiter) (l Limiter, err error) { - params := c.Parameters - if params == nil { - return nil, errors.New("Parameters not specified") - } - period, ok := params["Period"] - if !ok { - return nil, errors.New("Period not specified") - } - fPeriod, ok := period.(float64) - if !ok { - return nil, fmt.Errorf("Period is of the wrong type."+ - " Expected 'float64' got '%T'", period) - } - startRps, ok := params["StartRps"] - if !ok { - return nil, fmt.Errorf("StartRps should be specified") - } - fStartRps, ok := startRps.(float64) - if !ok { - return nil, fmt.Errorf("StartRps is of the wrong type."+ - " Expected 'float64' got '%T'", startRps) - } - endRps, ok := params["EndRps"] - if !ok { - return nil, fmt.Errorf("EndRps should be specified") - } - fEndRps, ok := endRps.(float64) - if !ok { - return nil, fmt.Errorf("EndRps is of the wrong type."+ - " Expected 'float64' got '%T'", endRps) - } - return NewLinear(fStartRps, fEndRps, fPeriod), nil -} diff --git a/limiter/linear_test.go b/limiter/linear_test.go deleted file mode 100644 index b9479b4ba..000000000 --- a/limiter/linear_test.go +++ /dev/null @@ -1,116 +0,0 @@ -package limiter - -import ( - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "golang.org/x/net/context" - - "github.com/yandex/pandora/config" - "github.com/yandex/pandora/utils" -) - -func TestQuadraticRightRoot(t *testing.T) { - root, err := quadraticRightRoot(1, 1, -6) - require.NoError(t, err) - assert.Equal(t, 2.0, root) -} - -func TestLinearLimiter(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*2) - defer cancel() - - limiter := NewLinear(5, 6, 1) - promise := utils.PromiseCtx(ctx, limiter.Start) - - ch := make(chan int, 100) - go func() { - i, err := Drain(ctx, limiter) - if err != nil { - t.Fatal(err) - } - ch <- i - }() - select { - - case i := <-ch: - assert.Equal(t, 6, i) - case <-ctx.Done(): - t.Fatal(ctx.Err()) - } - - select { - case err := <-promise: - require.NoError(t, err) - case <-ctx.Done(): - t.Fatal(ctx.Err()) - } -} - -func TestEmptyLinearLimiterConfig(t *testing.T) { - lc := &config.Limiter{ - LimiterType: "linear", - Parameters: nil, - } - l, err := NewLinearFromConfig(lc) - - if err == nil { - t.Errorf("Should return error if empty config") - } - if l != nil { - t.Errorf("Should return 'nil' if empty config") - } -} - -func TestLinearLimiterFromConfig(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*1) - defer cancel() - - lc := &config.Limiter{ - LimiterType: "linear", - Parameters: map[string]interface{}{ - "Period": 0.46, - "StartRps": 10.0, - "EndRps": 10.1, - }, - } - limiter, err := NewLinearFromConfig(lc) - - if err != nil { - t.Errorf("Got an error while creating linear limiter: %s", err) - } - if limiter == nil { - t.Errorf("Returned 'nil' with valid config") - } - switch tt := limiter.(type) { - case *linear: - default: - t.Errorf("Wrong limiter type returned (expected linear): %T", tt) - } - promise := utils.PromiseCtx(ctx, limiter.Start) - - ch := make(chan int, 100) - go func() { - i, err := Drain(ctx, limiter) - if err != nil { - t.Fatal(err) - } - ch <- i - }() - select { - - case i := <-ch: - assert.Equal(t, 5, i) - case <-ctx.Done(): - t.Fatal(ctx.Err()) - } - - select { - case err := <-promise: - require.NoError(t, err) - case <-ctx.Done(): - t.Fatal(ctx.Err()) - } -} diff --git a/limiter/periodic.go b/limiter/periodic.go deleted file mode 100644 index 43ccc381c..000000000 --- a/limiter/periodic.go +++ /dev/null @@ -1,90 +0,0 @@ -package limiter - -import ( - "errors" - "fmt" - "time" - - "github.com/yandex/pandora/config" - "golang.org/x/net/context" -) - -// periodic implements Limiter interface -type periodic struct { - limiter - ticker *time.Ticker -} - -var _ Limiter = (*periodic)(nil) - -func (pl *periodic) Start(ctx context.Context) error { - defer close(pl.control) - defer pl.ticker.Stop() // don't forget to stop ticker (goroutine leak possible) - // first tick just after the start - select { - case pl.control <- struct{}{}: - case <-ctx.Done(): - return nil - } -loop: - for { - select { - case <-pl.ticker.C: - select { - case pl.control <- struct{}{}: - case <-ctx.Done(): - break loop - - } - case <-ctx.Done(): - break loop - } - } - return nil -} - -func NewPeriodic(period time.Duration) (l Limiter) { - return &periodic{ - // timer-based limiters should have big enough cache - limiter: limiter{make(chan struct{}, 65536)}, - ticker: time.NewTicker(period), - } -} - -// NewPeriodicFromConfig returns periodic limiter -func NewPeriodicFromConfig(c *config.Limiter) (l Limiter, err error) { - params := c.Parameters - if params == nil { - return nil, errors.New("Parameters not specified") - } - period, ok := params["Period"] - if !ok { - return nil, errors.New("Period not specified") - } - switch t := period.(type) { - case float64: - l = NewPeriodic(time.Duration(period.(float64)*1e3) * time.Millisecond) - default: - return nil, fmt.Errorf("Period is of the wrong type."+ - " Expected 'float64' got '%T'", t) - } - maxCount, ok := params["MaxCount"] - if ok { - mc, ok := maxCount.(float64) - if !ok { - return nil, fmt.Errorf("MaxCount is of the wrong type."+ - " Expected 'float64' got '%T'", maxCount) - } - l = NewSize(int(mc), l) - } - batchSize, ok := params["BatchSize"] - if ok { - bs, ok := batchSize.(float64) - if !ok { - return nil, fmt.Errorf("BatchSize is of the wrong type."+ - " Expected 'float64' got '%T'", batchSize) - } - l = NewBatch(int(bs), l) - } - return l, nil -} diff --git a/limiter/periodic_test.go b/limiter/periodic_test.go deleted file mode 100644 index fcc621f65..000000000 --- a/limiter/periodic_test.go +++ /dev/null @@ -1,149 +0,0 @@ -package limiter - -import ( - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "golang.org/x/net/context" - - "github.com/yandex/pandora/config" - "github.com/yandex/pandora/utils" -) - -func TestPeriodicLimiter(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*1) - defer cancel() - - limiterCtx, cancelLimiter := context.WithCancel(ctx) - - limiter := NewPeriodic(time.Millisecond * 2) - promise := utils.PromiseCtx(limiterCtx, limiter.Start) - - ch := make(chan int) - go func() { - i, err := Drain(ctx, limiter) - if err != nil { - t.Fatal(err) - } - ch <- i - }() - time.Sleep(time.Millisecond * 7) - cancelLimiter() - select { - - case i := <-ch: - // we should take only 4 ticks from ticker (1 in the beginning and 3 after 6 milliseconds) - assert.Equal(t, 4, i) - case <-ctx.Done(): - t.Fatal(ctx.Err()) - } - - select { - case err := <-promise: - require.NoError(t, err) - case <-ctx.Done(): - t.Fatal(ctx.Err()) - } -} - -func TestEmptyPeriodicLimiterConfig(t *testing.T) { - lc := &config.Limiter{ - LimiterType: "periodic", - Parameters: nil, - } - l, err := NewPeriodicFromConfig(lc) - - if err == nil { - t.Errorf("Should return error if empty config") - } - if l != nil { - t.Errorf("Should return 'nil' if empty config") - } -} - -func TestPeriodicLimiterNoBatch(t *testing.T) { - lc := &config.Limiter{ - LimiterType: "periodic", - Parameters: map[string]interface{}{ - "Period": 0.46, - }, - } - l, err := NewPeriodicFromConfig(lc) - - if err != nil { - t.Errorf("Got an error while creating periodic limiter: %s", err) - } - if l == nil { - t.Errorf("Returned 'nil' with valid config") - } - switch tt := l.(type) { - case *periodic: - default: - t.Errorf("Wrong limiter type returned (expected periodicLimiter): %T", tt) - } -} - -func TestPeriodicLimiterBatch(t *testing.T) { - lc := &config.Limiter{ - LimiterType: "periodic", - Parameters: map[string]interface{}{ - "Period": 0.46, - "BatchSize": 3.0, - }, - } - l, err := NewPeriodicFromConfig(lc) - - if err != nil { - t.Errorf("Got an error while creating periodic limiter: %s", err) - } - if l == nil { - t.Errorf("Returned 'nil' with valid config") - } - switch tt := l.(type) { - case *batch: - default: - t.Errorf("Wrong limiter type returned (expected batchLimiter): %T", tt) - } -} - -func TestPeriodicLimiterBatchMaxCount(t *testing.T) { - lc := &config.Limiter{ - LimiterType: "periodic", - Parameters: map[string]interface{}{ - "Period": 0.46, - "BatchSize": 3.0, - "MaxCount": 5.0, - }, - } - l, err := NewPeriodicFromConfig(lc) - - if err != nil { - t.Errorf("Got an error while creating periodic limiter: %s", err) - } - if l == nil { - t.Errorf("Returned 'nil' with valid config") - } - switch tt := l.(type) { - case *batch: - default: - t.Errorf("Wrong limiter type returned (expected batchLimiter): %T", tt) - } - - ctx, cancel := context.WithTimeout(context.Background(), time.Second*3) - defer cancel() - - promise := utils.PromiseCtx(ctx, l.Start) - i, err := Drain(ctx, l) - assert.NoError(t, err) - // we should take only 0 tick from master - assert.Equal(t, i, 15) - - select { - case err := <-promise: - require.NoError(t, err) - case <-ctx.Done(): - t.Fatal(ctx.Err()) - } -} diff --git a/limiter/size.go b/limiter/size.go deleted file mode 100644 index 04eaf445b..000000000 --- a/limiter/size.go +++ /dev/null @@ -1,52 +0,0 @@ -package limiter - -import ( - "github.com/yandex/pandora/utils" - "golang.org/x/net/context" -) - -// size implements Limiter interface -type sizeLimiter struct { - limiter - master Limiter - size int -} - -var _ Limiter = (*sizeLimiter)(nil) // check interface - -func (sl *sizeLimiter) Start(ctx context.Context) error { - defer close(sl.control) - - masterCtx, cancelMaster := context.WithCancel(ctx) - masterPromise := utils.PromiseCtx(masterCtx, sl.master.Start) - -loop: - for i := 0; i < sl.size; i++ { - select { - case v, more := <-sl.master.Control(): - if !more { - break loop - } - select { - case sl.control <- v: - case <-ctx.Done(): - break loop - } - - case <-ctx.Done(): - break loop - } - } - cancelMaster() - return <-masterPromise -} - -// NewSize returns size limiter that cuts master limiter by size -// master shouldn't be started -func NewSize(size int, master Limiter) (l Limiter) { - return &sizeLimiter{ - limiter: limiter{make(chan struct{})}, - master: master, - size: size, - } -} diff --git a/limiter/size_test.go b/limiter/size_test.go deleted file mode 100644 index d281daffd..000000000 --- a/limiter/size_test.go +++ /dev/null @@ -1,109 +0,0 @@ -package limiter - -import ( - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "golang.org/x/net/context" - - "github.com/yandex/pandora/utils" -) - -func TestSizeLimiter(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - - master := &limiter{control: make(chan struct{}, 10)} - for i := 0; i < 10; i++ { - master.control <- struct{}{} - } - - size := NewSize(5, master) - promise := utils.PromiseCtx(ctx, size.Start) - - i, err := Drain(ctx, size) - assert.NoError(t, err) - // we should take only 5 tick from master - assert.Equal(t, i, 5) - - select { - case err := <-promise: - require.NoError(t, err) - case <-ctx.Done(): - t.Fatal(ctx.Err()) - } -} - -func TestEmptySizeLimiter(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - - master := &limiter{control: make(chan struct{}, 10)} - close(master.control) - - size := NewSize(5, master) - promise := utils.PromiseCtx(ctx, size.Start) - - i, err := Drain(ctx, size) - assert.NoError(t, err) - // we should take only 0 tick from master - assert.Equal(t, i, 0) - - select { - case err := <-promise: - require.NoError(t, err) - case <-ctx.Done(): - t.Fatal(ctx.Err()) - } -} - -func TestContextCancelInSizeLimiter(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - sizeCtx, sizeCancel := context.WithCancel(ctx) - sizeCancel() - - master := &limiter{control: make(chan struct{}, 10)} - - size := NewSize(5, master) - promise := utils.PromiseCtx(sizeCtx, size.Start) - i, err := Drain(ctx, size) - assert.NoError(t, err) - // we should take only 0 tick from master - assert.Equal(t, i, 0) - - select { - case err := <-promise: - require.NoError(t, err) - case <-ctx.Done(): - t.Fatal(ctx.Err()) - } -} - -func TestContextCancelWhileControlSizeLimiter(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - sizeCtx, sizeCancel := context.WithCancel(ctx) - - master := &limiter{control: make(chan struct{})} - - size := NewSize(5, master) - promise := utils.PromiseCtx(sizeCtx, size.Start) - - select { - case master.control <- struct{}{}: - // we fed master and then cancel context - sizeCancel() - case <-ctx.Done(): - t.Fatal(ctx.Err()) - } - - select { - case err := <-promise: - require.NoError(t, err) - case <-ctx.Done(): - t.Fatal(ctx.Err()) - } -} diff --git a/limiter/unlimited.go b/limiter/unlimited.go deleted file mode 100644 index 34ac3213f..000000000 --- a/limiter/unlimited.go +++ /dev/null @@ -1,30 +0,0 @@ -package limiter - -import ( - "github.com/yandex/pandora/config" - "golang.org/x/net/context" -) - -type unlimited struct { - limiter -} - -var _ Limiter = (*unlimited)(nil) - -func (ul *unlimited) Start(ctx context.Context) error { - defer close(ul.control) -loop: - for { - select { - case ul.control <- struct{}{}: - case <-ctx.Done(): - break loop - - } - } - return nil -} - -func NewUnlimitedFromConfig(c *config.Limiter) (l Limiter, err error) { - return &unlimited{limiter: limiter{make(chan struct{}, 64)}}, nil -} diff --git a/limiter/unlimited_test.go b/limiter/unlimited_test.go deleted file mode 100644 index c5ecdb232..000000000 --- a/limiter/unlimited_test.go +++ /dev/null @@ -1,38 +0,0 @@ -package limiter - -import ( - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "golang.org/x/net/context" - - "github.com/yandex/pandora/config" - "github.com/yandex/pandora/utils" -) - -func TestContextCancelInUnlimited(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - limitCtx, limitCancel := context.WithCancel(ctx) - limitCancel() - - lc := &config.Limiter{ - LimiterType: "unlimited", - Parameters: nil, - } - - unlimited, err := NewUnlimitedFromConfig(lc) - assert.NoError(t, err) - promise := utils.PromiseCtx(limitCtx, unlimited.Start) - _, err = Drain(ctx, unlimited) - assert.NoError(t, err) - - select { - case err := <-promise: - require.NoError(t, err) - case <-ctx.Done(): - t.Fatal(ctx.Err()) - } -} diff --git a/main.go b/main.go index b7445b42a..be760ad7b 100644 --- a/main.go +++ b/main.go @@ -1,14 +1,26 @@ -//go:generate go-extpoints +// Copyright (c) 2017 Yandex LLC. All rights reserved. +// Use of this source code is governed by a MPL 2.0 +// license that can be found in the LICENSE file. +// Author: Vladimir Skipor + package main import ( - "github.com/yandex/pandora/cmd" -) + "github.com/spf13/afero" -// If you want to extend pandora with another guns -// Just copy this main.go and import them. -// They should connect to extpoints automatically + "github.com/yandex/pandora/cli" + example "github.com/yandex/pandora/components/example/import" + phttp "github.com/yandex/pandora/components/phttp/import" + coreimport "github.com/yandex/pandora/core/import" +) func main() { - cmd.Run() + // CLI don't know anything about components initially. + // All extpoints constructors and default configurations should be registered, before CLI run. + fs := afero.NewOsFs() + coreimport.Import(fs) + phttp.Import(fs) + example.Import() + + cli.Run() } diff --git a/script/checkfmt.sh b/script/checkfmt.sh index c9a9d0c69..a6517fab4 100755 --- a/script/checkfmt.sh +++ b/script/checkfmt.sh @@ -5,7 +5,7 @@ test_fmt() { DIR="$1" hash goimports 2>&- || { echo >&2 "goimports not in PATH."; exit 1; } - for file in $(find -L $DIR -type f -name "*.go" -not -path "./Godeps/*") + for file in $(find -L $DIR -type f -name "*.go" -not -path "./vendor/*" -not -path "./.idea/*") do output=`cat $file | goimports -l 2>&1` if test $? -ne 0 diff --git a/script/coverage.sh b/script/coverage.sh index dd915d21c..a40d4ae37 100755 --- a/script/coverage.sh +++ b/script/coverage.sh @@ -14,7 +14,7 @@ _cd_into_top_level() { } _generate_coverage_files() { - for dir in $(find . -maxdepth 10 -not -path './.git*' -not -path '*/_*' -type d); do + for dir in $(find . -maxdepth 10 -not -path './.git*' -not -path '*/vendor/*' -not -path '*/mocks/*' -type d); do if ls $dir/*.go &>/dev/null ; then go test -covermode=count -coverprofile=$dir/profile.coverprofile $dir || fail=1 fi diff --git a/utils/interrupt.go b/utils/interrupt.go deleted file mode 100644 index 3611ab7d8..000000000 --- a/utils/interrupt.go +++ /dev/null @@ -1,12 +0,0 @@ -package utils - -import ( - "os" - "os/signal" -) - -func NotifyInterrupt() <-chan os.Signal { - c := make(chan os.Signal, 1) - signal.Notify(c, os.Interrupt) - return c -} diff --git a/utils/multierror.go b/utils/multierror.go deleted file mode 100644 index 7792f4a97..000000000 --- a/utils/multierror.go +++ /dev/null @@ -1,107 +0,0 @@ -package utils - -// Multierror was takenfrom https://github.com/hashicorp/go-multierror - -import ( - "fmt" - "strings" -) - -// MultiError is an error type to track multiple errors. This is used to -// accumulate errors in cases and return them as a single "error". -type MultiError struct { - Errors []error - ErrorFormat ErrorFormatFunc -} - -func (e *MultiError) Error() string { - fn := e.ErrorFormat - if fn == nil { - fn = ListFormatFunc - } - - return fn(e.Errors) -} - -// ErrorOrNil returns an error interface if this Error represents -// a list of errors, or returns nil if the list of errors is empty. This -// function is useful at the end of accumulation to make sure that the value -// returned represents the existence of errors. -func (e *MultiError) ErrorOrNil() error { - if e == nil { - return nil - } - if len(e.Errors) == 0 { - return nil - } - - return e -} - -func (e *MultiError) GoString() string { - return fmt.Sprintf("*%#v", *e) -} - -// WrappedErrors returns the list of errors that this Error is wrapping. -// It is an implementatin of the errwrap.Wrapper interface so that -// multierror.Error can be used with that library. -// -// This method is not safe to be called concurrently and is no different -// than accessing the Errors field directly. It is implementd only to -// satisfy the errwrap.Wrapper interface. -func (e *MultiError) WrappedErrors() []error { - return e.Errors -} - -// ErrorFormatFunc is a function callback that is called by Error to -// turn the list of errors into a string. -type ErrorFormatFunc func([]error) string - -// ListFormatFunc is a basic formatter that outputs the number of errors -// that occurred along with a bullet point list of the errors. -func ListFormatFunc(es []error) string { - points := make([]string, len(es)) - for i, err := range es { - points[i] = fmt.Sprintf("* %s", err) - } - - return fmt.Sprintf( - "%d error(s) occurred:\n\n%s", - len(es), strings.Join(points, "\n")) -} - -// AppendMulti is a helper function that will append more errors -// onto an Error in order to create a larger multi-error. -// -// If err is not a multierror.Error, then it will be turned into -// one. If any of the errs are multierr.Error, they will be flattened -// one level into err. -func AppendMulti(err error, errs ...error) *MultiError { - switch err := err.(type) { - case *MultiError: - // Typed nils can reach here, so initialize if we are nil - if err == nil { - err = new(MultiError) - } - - // Go through each error and flatten - for _, e := range errs { - switch e := e.(type) { - case *MultiError: - err.Errors = append(err.Errors, e.Errors...) - default: - err.Errors = append(err.Errors, e) - } - } - - return err - default: - newErrs := make([]error, 0, len(errs)+1) - if err != nil { - newErrs = append(newErrs, err) - } - newErrs = append(newErrs, errs...) - - return AppendMulti(&MultiError{}, newErrs...) - } -} diff --git a/utils/multierror_test.go b/utils/multierror_test.go deleted file mode 100644 index 9c23bbc15..000000000 --- a/utils/multierror_test.go +++ /dev/null @@ -1,81 +0,0 @@ -package utils - -import ( - "errors" - "reflect" - "testing" -) - -func TestError_Impl(t *testing.T) { - var _ error = new(MultiError) -} - -func TestErrorError_custom(t *testing.T) { - errors := []error{ - errors.New("foo"), - errors.New("bar"), - } - - fn := func(es []error) string { - return "foo" - } - - multi := &MultiError{Errors: errors, ErrorFormat: fn} - if multi.Error() != "foo" { - t.Fatalf("bad: %s", multi.Error()) - } -} - -func TestErrorError_default(t *testing.T) { - expected := "2 error(s) occurred:\n\n* foo\n* bar" - - errors := []error{ - errors.New("foo"), - errors.New("bar"), - } - - multi := &MultiError{Errors: errors} - if multi.Error() != expected { - t.Fatalf("bad: %s", multi.Error()) - } -} - -func TestErrorErrorOrNil(t *testing.T) { - err := new(MultiError) - if err.ErrorOrNil() != nil { - t.Fatalf("bad: %#v", err.ErrorOrNil()) - } - - err.Errors = []error{errors.New("foo")} - if v := err.ErrorOrNil(); v == nil { - t.Fatal("should not be nil") - } else if !reflect.DeepEqual(v, err) { - t.Fatalf("bad: %#v", v) - } -} - -func TestErrorWrappedErrors(t *testing.T) { - errors := []error{ - errors.New("foo"), - errors.New("bar"), - } - - multi := &MultiError{Errors: errors} - if !reflect.DeepEqual(multi.Errors, multi.WrappedErrors()) { - t.Fatalf("bad: %s", multi.WrappedErrors()) - } -} - -func TestListFormatFunc(t *testing.T) { - expected := "2 error(s) occurred:\n\n* foo\n* bar" - - errors := []error{ - errors.New("foo"), - errors.New("bar"), - } - - actual := ListFormatFunc(errors) - if actual != expected { - t.Fatalf("bad: %#v", actual) - } -} diff --git a/utils/promise.go b/utils/promise.go deleted file mode 100644 index f8c69c590..000000000 --- a/utils/promise.go +++ /dev/null @@ -1,39 +0,0 @@ -package utils - -import "golang.org/x/net/context" - -// Promise is a basic promise implementation: it wraps calls a function in a goroutine, -// and returns a channel which will later return the function's return value. -func Promise(f PromiseFunc) chan error { - ch := make(chan error, 1) - go func() { - ch <- f() - }() - return ch -} - -// Promise is a basic promise implementation: it wraps calls a function in a goroutine, -// and returns a channel which will later return the function's return value. -func PromiseCtx(ctx context.Context, f func(ctx context.Context) error) chan error { - return Promise(func() error { return f(ctx) }) -} - -type Promises []chan error - -type PromiseFunc func() error - -// group asyncs and return grouped err after all async is finished -func (promises Promises) All() chan error { - // TODO: wait in select for all promises at once. Because we can possible have a deadlock right now. - return Promise(func() error { - var result *MultiError - for _, a := range promises { - err := <-a - if err != nil { - result = AppendMulti(result, err) - } - } - return result.ErrorOrNil() - }) - -}