diff --git a/.bumpversion.cfg b/.bumpversion.cfg deleted file mode 100644 index 3362d72..0000000 --- a/.bumpversion.cfg +++ /dev/null @@ -1,5 +0,0 @@ -[bumpversion] -current_version = 1.7.0 -commit = True -tag = True - diff --git a/.travis.yml b/.travis.yml index 64a2687..43b7cba 100644 --- a/.travis.yml +++ b/.travis.yml @@ -2,23 +2,21 @@ sudo: false language: go go: - - "1.10" + - "1.11" + - "1.x" +env: + - GO111MODULE=on before_install: - - curl -L -s https://github.com/golang/dep/releases/download/v0.4.1/dep-linux-amd64 -o $GOPATH/bin/dep - - chmod +x $GOPATH/bin/dep - go get -u golang.org/x/lint/golint - - go get github.com/mattn/goveralls - go get golang.org/x/tools/cmd/cover - -install: - - dep ensure + - go get github.com/mattn/goveralls script: -# - golint $(go list ./... | grep -v /vendor/) +# - golint ./... - go vet ./... - - go test -cover ./... - - $HOME/gopath/bin/goveralls -service=travis-ci -ignore=example/* + - go test -covermode=count -coverprofile=profile.cov ./... + - goveralls -coverprofile=profile.cov -service=travis-ci -ignore=example/* notifications: email: false \ No newline at end of file diff --git a/Gopkg.lock b/Gopkg.lock deleted file mode 100644 index 6831435..0000000 --- a/Gopkg.lock +++ /dev/null @@ -1,290 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - digest = "1:f11e03e8297265765272534835027251cc808ed6dab124f5f5dbc37f7c908abb" - name = "github.com/Shopify/sarama" - packages = ["."] - pruneopts = "" - revision = "ec843464b50d4c8b56403ec9d589cf41ea30e722" - version = "v1.19.0" - -[[projects]] - branch = "master" - digest = "1:c0bec5f9b98d0bc872ff5e834fac186b807b656683bd29cb82fb207a1513fabb" - name = "github.com/beorn7/perks" - packages = ["quantile"] - pruneopts = "" - revision = "3a771d992973f24aa725d07868b467d1ddfceafb" - -[[projects]] - branch = "master" - digest = "1:339bac0d398920a11fd7e8a7fbd4f133d4edd4faa1c2cbaba256d3684a266019" - name = "github.com/bradfitz/gomemcache" - packages = ["memcache"] - pruneopts = "" - revision = "bc664df9673713a0ccf26e3b55a673ec7301088b" - -[[projects]] - digest = "1:6da5545112f73dbad12895d25e39818c1c3e8040ebba488d4d3fe43bc8685eb6" - name = "github.com/bsm/sarama-cluster" - packages = ["."] - pruneopts = "" - revision = "c618e605e15c0d7535f6c96ff8efbb0dba4fd66c" - version = "v2.1.15" - -[[projects]] - digest = "1:47fe89a242ccbae03d31b4c665d3d983786acd316c0d7c51bcfa0d019b205004" - name = "github.com/cactus/go-statsd-client" - packages = ["statsd"] - pruneopts = "" - revision = "138b925ccdf617776955904ba7759fce64406cec" - version = "v3.1.1" - -[[projects]] - digest = "1:0deddd908b6b4b768cfc272c16ee61e7088a60f7fe2f06c547bd3d8e1f8b8e77" - name = "github.com/davecgh/go-spew" - packages = ["spew"] - pruneopts = "" - revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73" - version = "v1.1.1" - -[[projects]] - digest = "1:6d6672f85a84411509885eaa32f597577873de00e30729b9bb0eb1e1faa49c12" - name = "github.com/eapache/go-resiliency" - packages = ["breaker"] - pruneopts = "" - revision = "ea41b0fad31007accc7f806884dcdf3da98b79ce" - version = "v1.1.0" - -[[projects]] - branch = "master" - digest = "1:6643c01e619a68f80ac12ad81223275df653528c6d7e3788291c1fd6f1d622f6" - name = "github.com/eapache/go-xerial-snappy" - packages = ["."] - pruneopts = "" - revision = "776d5712da21bc4762676d614db1d8a64f4238b0" - -[[projects]] - digest = "1:d8d46d21073d0f65daf1740ebf4629c65e04bf92e14ce93c2201e8624843c3d3" - name = "github.com/eapache/queue" - packages = ["."] - pruneopts = "" - revision = "44cc805cf13205b55f69e14bcb69867d1ae92f98" - version = "v1.1.0" - -[[projects]] - digest = "1:52735cb54d5122db78b96f7c8980fcb91dba9a65497bae324996d1ea152ad7d4" - name = "github.com/go-redis/redis" - packages = [ - ".", - "internal", - "internal/consistenthash", - "internal/hashtag", - "internal/pool", - "internal/proto", - "internal/singleflight", - "internal/util", - ] - pruneopts = "" - revision = "f3bba01df2026fc865f7782948845db9cf44cf23" - version = "v6.14.1" - -[[projects]] - digest = "1:a01080d20c45c031c13f3828c56e58f4f51d926a482ad10cc0316225097eb7ea" - name = "github.com/go-stack/stack" - packages = ["."] - pruneopts = "" - revision = "2fee6af1a9795aafbe0253a0cfbdf668e1fb8a9a" - version = "v1.8.0" - -[[projects]] - digest = "1:3dd078fda7500c341bc26cfbc6c6a34614f295a2457149fc1045cab767cbcf18" - name = "github.com/golang/protobuf" - packages = ["proto"] - pruneopts = "" - revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5" - version = "v1.2.0" - -[[projects]] - branch = "master" - digest = "1:2a5888946cdbc8aa360fd43301f9fc7869d663f60d5eedae7d4e6e5e4f06f2bf" - name = "github.com/golang/snappy" - packages = ["."] - pruneopts = "" - revision = "2e65f85255dbc3072edf28d6b5b8efc472979f5a" - -[[projects]] - digest = "1:9ea83adf8e96d6304f394d40436f2eb44c1dc3250d223b74088cc253a6cd0a1c" - name = "github.com/mattn/go-colorable" - packages = ["."] - pruneopts = "" - revision = "167de6bfdfba052fa6b2d3664c8f5272e23c9072" - version = "v0.0.9" - -[[projects]] - digest = "1:3140e04675a6a91d2a20ea9d10bdadf6072085502e6def6768361260aee4b967" - name = "github.com/mattn/go-isatty" - packages = ["."] - pruneopts = "" - revision = "6ca4dbf54d38eea1a992b3c722a76a5d1c4cb25c" - version = "v0.0.4" - -[[projects]] - digest = "1:63722a4b1e1717be7b98fc686e0b30d5e7f734b9e93d7dee86293b6deab7ea28" - name = "github.com/matttproud/golang_protobuf_extensions" - packages = ["pbutil"] - pruneopts = "" - revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c" - version = "v1.0.1" - -[[projects]] - digest = "1:4d5b9abaf46aabf48c8a84084247b6a4e6e14dc46b1b93a56c262ac97970e3ee" - name = "github.com/msales/pkg" - packages = [ - "cache", - "log", - "stats", - ] - pruneopts = "" - revision = "5cf7c801763778eaab3421be1a824ca4a367c600" - version = "v2.4.1" - -[[projects]] - digest = "1:b1df71d0b2287062b90c6b4c8d3c934440aa0d2eb201d03f22be0f045860b4aa" - name = "github.com/pierrec/lz4" - packages = [ - ".", - "internal/xxh32", - ] - pruneopts = "" - revision = "635575b42742856941dbc767b44905bb9ba083f6" - version = "v2.0.7" - -[[projects]] - digest = "1:7365acd48986e205ccb8652cc746f09c8b7876030d53710ea6ef7d0bd0dcd7ca" - name = "github.com/pkg/errors" - packages = ["."] - pruneopts = "" - revision = "645ef00459ed84a119197bfb8d8205042c6df63d" - version = "v0.8.0" - -[[projects]] - digest = "1:256484dbbcd271f9ecebc6795b2df8cad4c458dd0f5fd82a8c2fa0c29f233411" - name = "github.com/pmezard/go-difflib" - packages = ["difflib"] - pruneopts = "" - revision = "792786c7400a136282c1664665ae0a8db921c6c2" - version = "v1.0.0" - -[[projects]] - digest = "1:4142d94383572e74b42352273652c62afec5b23f325222ed09198f46009022d1" - name = "github.com/prometheus/client_golang" - packages = [ - "prometheus", - "prometheus/promhttp", - ] - pruneopts = "" - revision = "c5b7fccd204277076155f10851dad72b76a49317" - version = "v0.8.0" - -[[projects]] - branch = "master" - digest = "1:185cf55b1f44a1bf243558901c3f06efa5c64ba62cfdcbb1bf7bbe8c3fb68561" - name = "github.com/prometheus/client_model" - packages = ["go"] - pruneopts = "" - revision = "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f" - -[[projects]] - branch = "master" - digest = "1:d1b5970f2a453e7c4be08117fb683b5d096bad9d17f119a6e58d4c561ca205dd" - name = "github.com/prometheus/common" - packages = [ - "expfmt", - "internal/bitbucket.org/ww/goautoneg", - "model", - ] - pruneopts = "" - revision = "bcb74de08d37a417cb6789eec1d6c810040f0470" - -[[projects]] - branch = "master" - digest = "1:1f62ed2c173c42c1edad2e94e127318ea11b0d28c62590c82a8d2d3cde189afe" - name = "github.com/prometheus/procfs" - packages = [ - ".", - "internal/util", - "nfs", - "xfs", - ] - pruneopts = "" - revision = "185b4288413d2a0dd0806f78c90dde719829e5ae" - -[[projects]] - branch = "master" - digest = "1:15bcdc717654ef21128e8af3a63eec39a6d08a830e297f93d65163f87c8eb523" - name = "github.com/rcrowley/go-metrics" - packages = ["."] - pruneopts = "" - revision = "e2704e165165ec55d062f5919b4b29494e9fa790" - -[[projects]] - digest = "1:711eebe744c0151a9d09af2315f0bb729b2ec7637ef4c410fa90a18ef74b65b6" - name = "github.com/stretchr/objx" - packages = ["."] - pruneopts = "" - revision = "477a77ecc69700c7cdeb1fa9e129548e1c1c393c" - version = "v0.1.1" - -[[projects]] - digest = "1:c587772fb8ad29ad4db67575dad25ba17a51f072ff18a22b4f0257a4d9c24f75" - name = "github.com/stretchr/testify" - packages = [ - "assert", - "mock", - ] - pruneopts = "" - revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686" - version = "v1.2.2" - -[[projects]] - branch = "master" - digest = "1:2ed0bf267e44950120acd95570227e28184573ffb099bd85b529ee148e004ddb" - name = "golang.org/x/sys" - packages = ["unix"] - pruneopts = "" - revision = "fa43e7bc11baaae89f3f902b2b4d832b68234844" - -[[projects]] - digest = "1:4014584c076f25aaf35d9de36c79ae2d208bba32c780f405e3395dad79292e22" - name = "gopkg.in/DATA-DOG/go-sqlmock.v1" - packages = ["."] - pruneopts = "" - revision = "d76b18b42f285b792bf985118980ce9eacea9d10" - version = "v1.3.0" - -[[projects]] - digest = "1:a4fcebba8c1b8ae25dd31f1cfa6c6ed5b1e5719ab2e9e1c18e8c98969ebde215" - name = "gopkg.in/inconshreveable/log15.v2" - packages = ["."] - pruneopts = "" - revision = "67afb5ed74ec82fd7ac8f49d27c509ac6f991970" - version = "v2.14" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - input-imports = [ - "github.com/Shopify/sarama", - "github.com/bsm/sarama-cluster", - "github.com/msales/pkg/cache", - "github.com/msales/pkg/stats", - "github.com/pkg/errors", - "github.com/stretchr/testify/assert", - "github.com/stretchr/testify/mock", - "gopkg.in/DATA-DOG/go-sqlmock.v1", - "gopkg.in/inconshreveable/log15.v2", - ] - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml deleted file mode 100644 index b3cd1bb..0000000 --- a/Gopkg.toml +++ /dev/null @@ -1,23 +0,0 @@ -[[constraint]] - name = "github.com/msales/pkg" - version = "2.*" - -[[constraint]] - name = "gopkg.in/inconshreveable/log15.v2" - version = "2.11.*" - -[[constraint]] - name = "github.com/Shopify/sarama" - version = "1.*" - -[[constraint]] - name = "github.com/bsm/sarama-cluster" - version = "2.*" - -[[constraint]] - name = "github.com/stretchr/testify" - version = "1.*" - -[[constraint]] - name = "gopkg.in/DATA-DOG/go-sqlmock.v1" - version = "1.*" diff --git a/README.md b/README.md index 88cb39c..62980f8 100644 --- a/README.md +++ b/README.md @@ -3,6 +3,7 @@ [![Go Report Card](https://goreportcard.com/badge/github.com/msales/streams)](https://goreportcard.com/report/github.com/msales/streams) [![Build Status](https://travis-ci.org/msales/streams.svg?branch=master)](https://travis-ci.org/msales/streams) [![Coverage Status](https://coveralls.io/repos/github/msales/streams/badge.svg?branch=master)](https://coveralls.io/github/msales/streams?branch=master) +[![GoDoc](https://godoc.org/github.com/msales/streams?status.svg)](https://godoc.org/github.com/msales/streams) [![GitHub release](https://img.shields.io/github/release/msales/streams.svg)](https://github.com/msales/streams/releases) [![GitHub license](https://img.shields.io/badge/license-MIT-blue.svg)](https://raw.githubusercontent.com/msales/streams/master/LICENSE) diff --git a/cache/sink.go b/cache/sink.go index 5d2d8a7..4534da5 100644 --- a/cache/sink.go +++ b/cache/sink.go @@ -3,8 +3,8 @@ package cache import ( "time" - "github.com/msales/pkg/cache" - "github.com/msales/streams" + "github.com/msales/pkg/v3/cache" + "github.com/msales/streams/v2" ) // Sink represents a Cache streams sink. @@ -48,7 +48,7 @@ func (p *Sink) Process(msg *streams.Message) error { return p.pipe.Commit(msg) } - return nil + return p.pipe.Mark(msg) } // Close closes the processor. diff --git a/cache/sink_test.go b/cache/sink_test.go index e74fc70..161c454 100644 --- a/cache/sink_test.go +++ b/cache/sink_test.go @@ -5,10 +5,10 @@ import ( "testing" "time" - cache2 "github.com/msales/pkg/cache" - "github.com/msales/streams" - "github.com/msales/streams/cache" - "github.com/msales/streams/mocks" + cache2 "github.com/msales/pkg/v3/cache" + "github.com/msales/streams/v2" + "github.com/msales/streams/v2/cache" + "github.com/msales/streams/v2/mocks" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" ) @@ -23,6 +23,7 @@ func TestSink_Process(t *testing.T) { c := new(MockCache) c.On("Set", "test", "test", time.Millisecond).Return(nil) pipe := mocks.NewPipe(t) + pipe.ExpectMark("test", "test") s := cache.NewSink(c, time.Millisecond, 10) s.WithPipe(pipe) diff --git a/example/benchmark/main.go b/example/benchmark/main.go index 6b94002..9542faa 100644 --- a/example/benchmark/main.go +++ b/example/benchmark/main.go @@ -8,8 +8,8 @@ import ( "os/signal" "syscall" - "github.com/msales/pkg/stats" - "github.com/msales/streams" + "github.com/msales/pkg/v3/stats" + "github.com/msales/streams/v2" ) import _ "net/http/pprof" @@ -41,9 +41,10 @@ func main() { func task(ctx context.Context) (streams.Task, error) { builder := streams.NewStreamBuilder() builder.Source("nil-source", newNilSource(ctx)). - Map("do-nothing", nothingMapper) + MapFunc("do-nothing", nothingMapper) - task := streams.NewTask(builder.Build()) + tp, _ := builder.Build() + task := streams.NewTask(tp) task.OnError(func(err error) { log.Fatal(err.Error()) }) @@ -62,7 +63,7 @@ func newNilSource(ctx context.Context) streams.Source { } func (s *nilSource) Consume() (*streams.Message, error) { - return streams.NewMessageWithContext(s.ctx, nil, nil), nil + return streams.NewMessageWithContext(s.ctx, nil, 1), nil } func (s *nilSource) Commit(v interface{}) error { @@ -74,7 +75,7 @@ func (s *nilSource) Close() error { } func nothingMapper(msg *streams.Message) (*streams.Message, error) { - return msg, nil + return nil, nil } func waitForSignals() chan os.Signal { diff --git a/example/branch/main.go b/example/branch/main.go index 848a845..a319a6a 100644 --- a/example/branch/main.go +++ b/example/branch/main.go @@ -4,41 +4,40 @@ import ( "context" "log" "math/rand" - "os" - "os/signal" - "syscall" - "github.com/msales/pkg/stats" - "github.com/msales/streams" - "gopkg.in/inconshreveable/log15.v2" + "github.com/msales/pkg/v3/clix" + "github.com/msales/pkg/v3/stats" + "github.com/msales/streams/v2" ) func main() { ctx := context.Background() - logger := log15.New() - logger.SetHandler(log15.LazyHandler(log15.StreamHandler(os.Stderr, log15.LogfmtFormat()))) - client, err := stats.NewStatsd("localhost:8125", "streams.example") if err != nil { - logger.Error(err.Error()) - os.Exit(1) + log.Fatal(err) } ctx = stats.WithStats(ctx, client) builder := streams.NewStreamBuilder() s := builder.Source("rand-source", newRandIntSource(ctx)). - Branch("branch", branchEvenNumberFilter, branchOddNumberFilter) + BranchFunc("branch", branchEvenNumberFilter, branchOddNumberFilter) + + sink1 := newCommitProcessor(1000) + sink2 := newCommitProcessor(1000) // Event numbers - s[0].Print("print-event") + s[0].Print("print-event"). + Process("commit-sink1", sink1) // Odd Numbers - s[1].Map("negative-mapper", negativeMapper). - Print("print-negative") + s[1].MapFunc("negative-mapper", negativeMapper). + Print("print-negative"). + Process("commit-sink2", sink2) - task := streams.NewTask(builder.Build()) + tp, _ := builder.Build() + task := streams.NewTask(tp) task.OnError(func(err error) { log.Fatal(err.Error()) }) @@ -46,7 +45,7 @@ func main() { defer task.Close() // Wait for SIGTERM - <-waitForSignals() + <-clix.WaitForSignals() } type randIntSource struct { @@ -73,6 +72,43 @@ func (s *randIntSource) Close() error { return nil } +type commitProcessor struct { + pipe streams.Pipe + + batch int + count int +} + +func newCommitProcessor(batch int) streams.Processor { + return &commitProcessor{ + batch: batch, + } +} + +func (p *commitProcessor) WithPipe(pipe streams.Pipe) { + p.pipe = pipe +} + +func (p *commitProcessor) Process(msg *streams.Message) error { + p.count++ + + if p.count >= p.batch { + return p.pipe.Commit(msg) + } + + return p.pipe.Mark(msg) +} + +func (p *commitProcessor) Commit() error { + p.count = 0 + + return nil +} + +func (p *commitProcessor) Close() error { + return nil +} + func branchOddNumberFilter(msg *streams.Message) (bool, error) { num := msg.Value.(int) @@ -91,10 +127,3 @@ func negativeMapper(msg *streams.Message) (*streams.Message, error) { return msg, nil } - -func waitForSignals() chan os.Signal { - sigs := make(chan os.Signal, 1) - signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) - - return sigs -} diff --git a/example/kafka/docker-compose.yml b/example/kafka/docker-compose.yml new file mode 100644 index 0000000..0df82e9 --- /dev/null +++ b/example/kafka/docker-compose.yml @@ -0,0 +1,23 @@ +version: "3" + +services: + zookeeper: + image: zookeeper:3.3.6 + environment: + ZOO_STANDALONE_ENABLED: "TRUE" + ports: + - '2181:2181' + restart: on-failure + + kafka: + image: wurstmeister/kafka:1.1.0 + environment: + KAFKA_ADVERTISED_HOST_NAME: 'localhost' + KAFKA_ADVERTISED_PORT: 9092 + KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 + KAFKA_CREATE_TOPICS: "example1:1:1" + ports: + - '9092:9092' + depends_on: + - zookeeper + restart: on-failure \ No newline at end of file diff --git a/example/kafka/main.go b/example/kafka/main.go index c67b8cd..3f3fef4 100644 --- a/example/kafka/main.go +++ b/example/kafka/main.go @@ -4,15 +4,13 @@ import ( "context" "log" "math/rand" - "os" - "os/signal" "strconv" - "syscall" "github.com/Shopify/sarama" - "github.com/msales/pkg/stats" - "github.com/msales/streams" - "github.com/msales/streams/kafka" + "github.com/msales/pkg/v3/clix" + "github.com/msales/pkg/v3/stats" + "github.com/msales/streams/v2" + "github.com/msales/streams/v2/kafka" ) func main() { @@ -42,8 +40,7 @@ func main() { defer c.Close() defer p.Close() - // Wait for SIGTERM - <-waitForSignals() + <-clix.WaitForSignals() } func producerTask(ctx context.Context, brokers []string, c *sarama.Config) (streams.Task, error) { @@ -60,10 +57,11 @@ func producerTask(ctx context.Context, brokers []string, c *sarama.Config) (stre builder := streams.NewStreamBuilder() builder.Source("rand-source", newRandIntSource(ctx)). - Map("to-string", stringMapper). + MapFunc("to-string", stringMapper). Process("kafka-sink", sink) - task := streams.NewTask(builder.Build()) + tp, _ := builder.Build() + task := streams.NewTask(tp) task.OnError(func(err error) { log.Fatal(err.Error()) }) @@ -76,7 +74,7 @@ func consumerTask(ctx context.Context, brokers []string, c *sarama.Config) (stre config.Config = *c config.Brokers = brokers config.Topic = "example1" - config.GroupId = "example-consumer" + config.GroupID = "example-consumer" config.ValueDecoder = kafka.StringDecoder{} config.Ctx = ctx @@ -85,12 +83,15 @@ func consumerTask(ctx context.Context, brokers []string, c *sarama.Config) (stre return nil, err } + sink := newCommitProcessor(1000) + builder := streams.NewStreamBuilder() builder.Source("kafka-source", src). - Map("to-int", intMapper). - Print("print") + MapFunc("to-int", intMapper). + Process("commit-sink", sink) - task := streams.NewTask(builder.Build()) + tp, _ := builder.Build() + task := streams.NewTask(tp) task.OnError(func(err error) { log.Fatal(err.Error()) }) @@ -141,9 +142,39 @@ func intMapper(msg *streams.Message) (*streams.Message, error) { return msg, nil } -func waitForSignals() chan os.Signal { - sigs := make(chan os.Signal, 1) - signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) +type commitProcessor struct { + pipe streams.Pipe + + batch int + count int +} + +func newCommitProcessor(batch int) streams.Processor { + return &commitProcessor{ + batch: batch, + } +} + +func (p *commitProcessor) WithPipe(pipe streams.Pipe) { + p.pipe = pipe +} + +func (p *commitProcessor) Process(msg *streams.Message) error { + p.count++ - return sigs + if p.count >= p.batch { + return p.pipe.Commit(msg) + } + + return p.pipe.Mark(msg) +} + +func (p *commitProcessor) Commit() error { + p.count = 0 + + return nil +} + +func (p *commitProcessor) Close() error { + return nil } diff --git a/example/merge/main.go b/example/merge/main.go index fa16c28..9252510 100644 --- a/example/merge/main.go +++ b/example/merge/main.go @@ -7,22 +7,23 @@ import ( "os/signal" "syscall" - "github.com/msales/streams" + "github.com/msales/streams/v2" ) func main() { builder := streams.NewStreamBuilder() stream1 := builder.Source("rand1-source", newRandIntSource()). - Filter("filter1", lowNumberFilter) + FilterFunc("filter1", lowNumberFilter) builder.Source("rand2-source", newRandIntSource()). - Filter("filter2", highNumberFilter). - Map("add-hundedred-mapper", addHundredMapper). + FilterFunc("filter2", highNumberFilter). + MapFunc("add-hundedred-mapper", addHundredMapper). Merge("merge", stream1). Print("print") - task := streams.NewTask(builder.Build()) + tp, _ := builder.Build() + task := streams.NewTask(tp) task.OnError(func(err error) { log.Fatal(err.Error()) }) diff --git a/example/simple/main.go b/example/simple/main.go index 07f92a8..c20f015 100644 --- a/example/simple/main.go +++ b/example/simple/main.go @@ -7,17 +7,18 @@ import ( "os/signal" "syscall" - "github.com/msales/streams" + "github.com/msales/streams/v2" ) func main() { builder := streams.NewStreamBuilder() builder.Source("rand-source", newRandIntSource()). - Filter("odd-filter", oddNumberFilter). - Map("double-mapper", doubleMapper). + FilterFunc("odd-filter", oddNumberFilter). + MapFunc("double-mapper", doubleMapper). Print("print") - task := streams.NewTask(builder.Build()) + tp, _ := builder.Build() + task := streams.NewTask(tp) task.OnError(func(err error) { log.Fatal(err.Error()) }) diff --git a/fakes_test.go b/fakes_test.go new file mode 100644 index 0000000..5a30a5f --- /dev/null +++ b/fakes_test.go @@ -0,0 +1,121 @@ +package streams_test + +import ( + "sync" + + "github.com/msales/streams/v2" +) + +type fakeSource struct { + Key interface{} + Value interface{} +} + +func (s *fakeSource) Consume() (*streams.Message, error) { + return streams.NewMessage(s.Key, s.Value), nil +} + +func (s *fakeSource) Commit(interface{}) error { + return nil +} + +func (s *fakeSource) Close() error { + return nil +} + +type fakeCommitter struct{} + +func (*fakeCommitter) WithPipe(streams.Pipe) {} + +func (*fakeCommitter) Process(*streams.Message) error { + return nil +} + +func (*fakeCommitter) Commit() error { + return nil +} + +func (*fakeCommitter) Close() error { + return nil +} + +type fakeMetadata struct{} + +func (m *fakeMetadata) WithOrigin(streams.MetadataOrigin) { +} + +func (m *fakeMetadata) Update(streams.Metadata) streams.Metadata { + return m +} + +func (m *fakeMetadata) Merge(v streams.Metadata) streams.Metadata { + return m +} + +type fakeMetastore struct { + Metadata map[streams.Processor]streams.Metaitems +} + +func (ms *fakeMetastore) Pull(p streams.Processor) (streams.Metaitems, error) { + return ms.Metadata[p], nil +} + +func (ms *fakeMetastore) PullAll() (map[streams.Processor]streams.Metaitems, error) { + return ms.Metadata, nil +} + +func (ms *fakeMetastore) Mark(streams.Processor, streams.Source, streams.Metadata) error { + return nil +} + +type fakePump struct { + sync.Mutex +} + +func (*fakePump) Accept(*streams.Message) error { + return nil +} + +func (*fakePump) Stop() { + +} + +func (*fakePump) Close() error { + return nil +} + +type fakeNode struct { + Proc streams.Processor +} + +func (*fakeNode) Name() string { + return "" +} + +func (*fakeNode) AddChild(n streams.Node) {} + +func (*fakeNode) Children() []streams.Node { + return []streams.Node{} +} + +func (n *fakeNode) Processor() streams.Processor { + return n.Proc +} + +type fakeSupervisor struct{} + +func (*fakeSupervisor) WithPumps(pumps map[streams.Node]streams.Pump) { + +} + +func (*fakeSupervisor) Start() error { + return nil +} + +func (*fakeSupervisor) Commit(streams.Processor) error { + return nil +} + +func (*fakeSupervisor) Close() error { + return nil +} diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..5f3f073 --- /dev/null +++ b/go.mod @@ -0,0 +1,17 @@ +module github.com/msales/streams/v2 + +require ( + github.com/Shopify/sarama v1.19.0 + github.com/Shopify/toxiproxy v2.1.3+incompatible // indirect + github.com/bsm/sarama-cluster v2.1.15+incompatible + github.com/eapache/go-resiliency v1.1.0 // indirect + github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 // indirect + github.com/eapache/queue v1.1.0 // indirect + github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db // indirect + github.com/msales/pkg/v3 v3.1.0 + github.com/pierrec/lz4 v0.0.0-20181005164709-635575b42742 // indirect + github.com/pkg/errors v0.8.0 + github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a // indirect + github.com/stretchr/testify v1.2.2 + gopkg.in/DATA-DOG/go-sqlmock.v1 v1.3.0 +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..f110ad2 --- /dev/null +++ b/go.sum @@ -0,0 +1,99 @@ +github.com/Shopify/sarama v1.19.0 h1:9oksLxC6uxVPHPVYUmq6xhr1BOF/hHobWH2UzO67z1s= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/toxiproxy v2.1.3+incompatible h1:awiJqUYH4q4OmoBiRccJykjd7B+w0loJi2keSna4X/M= +github.com/Shopify/toxiproxy v2.1.3+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/bradfitz/gomemcache v0.0.0-20180710155616-bc664df96737 h1:rRISKWyXfVxvoa702s91Zl5oREZTrR3yv+tXrrX7G/g= +github.com/bradfitz/gomemcache v0.0.0-20180710155616-bc664df96737/go.mod h1:PmM6Mmwb0LSuEubjR8N7PtNe1KxZLtOUHtbeikc5h60= +github.com/bsm/sarama-cluster v2.1.15+incompatible h1:RkV6WiNRnqEEbp81druK8zYhmnIgdOjqSVi0+9Cnl2A= +github.com/bsm/sarama-cluster v2.1.15+incompatible/go.mod h1:r7ao+4tTNXvWm+VRpRJchr2kQhqxgmAp2iEX5W96gMM= +github.com/cactus/go-statsd-client v3.1.1+incompatible h1:p97okCU2aaeSxQ6KzMdGEwQkiGBMys71/J0XWoirbJY= +github.com/cactus/go-statsd-client v3.1.1+incompatible/go.mod h1:cMRcwZDklk7hXp+Law83urTHUiHMzCev/r4JMYr/zU0= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/eapache/go-resiliency v1.1.0 h1:1NtRmCAqadE2FN4ZcN6g90TP3uk8cg9rn9eNK2197aU= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/go-redis/redis v6.14.2+incompatible h1:UE9pLhzmWf+xHNmZsoccjXosPicuiNaInPgym8nzfg0= +github.com/go-redis/redis v6.14.2+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/msales/logged v0.2.0 h1:amLuKkHy6vLCoV2PWKUK3uIinRyedZhsvYLkvU87u+A= +github.com/msales/logged v0.2.0/go.mod h1:JxxBSh+kSlxSYZD+hGTPIijsxoqKhY9qNCpti1dhp4s= +github.com/msales/pkg/v3 v3.1.0 h1:aEYHKzmd/jUU0f+Hs3hNC5Xac4h9kXrOCn33s+FiXw8= +github.com/msales/pkg/v3 v3.1.0/go.mod h1:WlfgoRj+X6d3VByslgMwu3d4HFRY9LMdp8fRPfAm7TM= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/pierrec/lz4 v0.0.0-20181005164709-635575b42742 h1:wKfigKMTgvSzBLIVvB5QaBBQI0odU6n45/UKSphjLus= +github.com/pierrec/lz4 v0.0.0-20181005164709-635575b42742/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= +github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.2 h1:awm861/B8OKDd2I/6o1dy3ra4BamzKhYOiGItCeZ740= +github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275 h1:PnBWHBf+6L0jOqq0gIVUe6Yk0/QMZ640k6NvkxcBf+8= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a h1:9a8MnZMP0X2nLJdBg+pBmGgkJlSaKC2KaQmTCk1XDtE= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/ryanuber/go-glob v0.0.0-20170128012129-256dc444b735/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181207154023-610586996380 h1:zPQexyRtNYBc7bcHmehl1dH6TB3qn8zytv8cBGLDNY0= +golang.org/x/net v0.0.0-20181207154023-610586996380/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181210030007-2a47403f2ae5 h1:SlFRMb9PEnqzqnBRCynVOhxv4vHjB2lnIoxK6p5nzFM= +golang.org/x/sys v0.0.0-20181210030007-2a47403f2ae5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +gopkg.in/DATA-DOG/go-sqlmock.v1 v1.3.0 h1:FVCohIoYO7IJoDDVpV2pdq7SgrMH6wHnuTyrdrxJNoY= +gopkg.in/DATA-DOG/go-sqlmock.v1 v1.3.0/go.mod h1:OdE7CF6DbADk7lN8LIKRzRJTTZXIjtWgA5THM5lhBAw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/urfave/cli.v1 v1.20.0 h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0= +gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0= +gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/kafka/encoder_test.go b/kafka/encoder_test.go new file mode 100644 index 0000000..5b3ff28 --- /dev/null +++ b/kafka/encoder_test.go @@ -0,0 +1,78 @@ +package kafka_test + +import ( + "testing" + + "github.com/msales/streams/v2/kafka" + "github.com/stretchr/testify/assert" +) + +func TestByteDecoder_Decode(t *testing.T) { + in := []byte("foobar") + dec := kafka.ByteDecoder{} + + got, err := dec.Decode(in) + + assert.NoError(t, err) + assert.Equal(t, []byte("foobar"), got) +} + +func TestByteEncoder_Encode(t *testing.T) { + tests := []struct { + in interface{} + want []byte + }{ + { + in: []byte("foobar"), + want: []byte("foobar"), + }, + { + in: nil, + want: nil, + }, + } + + for _, tt := range tests { + enc := kafka.ByteEncoder{} + + got, err := enc.Encode(tt.in) + + assert.NoError(t, err) + assert.Equal(t, tt.want, got) + } +} + +func TestStringDecoder_Decode(t *testing.T) { + in := []byte("foobar") + dec := kafka.StringDecoder{} + + got, err := dec.Decode(in) + + assert.NoError(t, err) + assert.Equal(t, "foobar", got) +} + +func TestStringEncoder_Encode(t *testing.T) { + tests := []struct { + in interface{} + want []byte + }{ + { + in: "foobar", + want: []byte("foobar"), + }, + { + in: nil, + want: nil, + }, + } + + for _, tt := range tests { + enc := kafka.StringEncoder{} + + got, err := enc.Encode(tt.in) + + assert.NoError(t, err) + assert.Equal(t, tt.want, got) + } +} diff --git a/kafka/sink.go b/kafka/sink.go index 0ffbb1c..a342404 100644 --- a/kafka/sink.go +++ b/kafka/sink.go @@ -2,7 +2,7 @@ package kafka import ( "github.com/Shopify/sarama" - "github.com/msales/streams" + "github.com/msales/streams/v2" ) // SinkConfig represents the configuration of a Sink. @@ -122,16 +122,21 @@ func (p *Sink) Process(msg *streams.Message) error { p.count++ if p.count >= p.batch { - if err := p.producer.SendMessages(p.buf); err != nil { - return err - } + return p.pipe.Commit(msg) + } - p.count = 0 - p.buf = p.buf[:0] + return p.pipe.Mark(msg) +} - return p.pipe.Commit(msg) +//Commit commits a processors batch. +func (p *Sink) Commit() error { + if err := p.producer.SendMessages(p.buf); err != nil { + return err } + p.count = 0 + p.buf = p.buf[:0] + return nil } diff --git a/kafka/sink_internal_test.go b/kafka/sink_internal_test.go new file mode 100644 index 0000000..6d3fdff --- /dev/null +++ b/kafka/sink_internal_test.go @@ -0,0 +1,39 @@ +package kafka + +import ( + "testing" + + "github.com/Shopify/sarama" + "github.com/msales/streams/v2" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" +) + +func TestSink_ConsumeReturnsKeyEncodeError(t *testing.T) { + s := Sink{ + keyEncoder: errorEncoder{}, + buf: []*sarama.ProducerMessage{}, + } + + err := s.Process(streams.NewMessage("foo", "foo")) + + assert.Error(t, err) +} + +func TestSink_ConsumeReturnsValueEncodeError(t *testing.T) { + s := Sink{ + keyEncoder: StringEncoder{}, + valueEncoder: errorEncoder{}, + buf: []*sarama.ProducerMessage{}, + } + + err := s.Process(streams.NewMessage("foo", "foo")) + + assert.Error(t, err) +} + +type errorEncoder struct{} + +func (errorEncoder) Encode(interface{}) ([]byte, error) { + return nil, errors.New("test") +} diff --git a/kafka/sink_test.go b/kafka/sink_test.go new file mode 100644 index 0000000..5dd993f --- /dev/null +++ b/kafka/sink_test.go @@ -0,0 +1,238 @@ +package kafka_test + +import ( + "testing" + + "github.com/Shopify/sarama" + "github.com/msales/streams/v2" + "github.com/msales/streams/v2/kafka" + "github.com/msales/streams/v2/mocks" + "github.com/stretchr/testify/assert" +) + +func TestNewSinkConfig(t *testing.T) { + c := kafka.NewSinkConfig() + + assert.IsType(t, &kafka.SinkConfig{}, c) +} + +func TestSinkConfig_Validate(t *testing.T) { + c := kafka.NewSinkConfig() + c.Brokers = []string{"test"} + + err := c.Validate() + + assert.NoError(t, err) +} + +func TestSinkConfig_ValidateErrors(t *testing.T) { + tests := []struct { + name string + cfg func(*kafka.SinkConfig) + err string + }{ + { + name: "Brokers", + cfg: func(c *kafka.SinkConfig) { + c.Brokers = []string{} + }, + err: "Brokers must have at least one broker", + }, + { + name: "KeyEncoder", + cfg: func(c *kafka.SinkConfig) { + c.Brokers = []string{"test"} + c.KeyEncoder = nil + }, + err: "KeyEncoder must be an instance of Encoder", + }, + { + name: "ValueEncoder", + cfg: func(c *kafka.SinkConfig) { + c.Brokers = []string{"test"} + c.ValueEncoder = nil + }, + err: "ValueEncoder must be an instance of Encoder", + }, + { + name: "BufferSize", + cfg: func(c *kafka.SinkConfig) { + c.Brokers = []string{"test"} + c.BatchSize = 0 + }, + err: "BatchSize must be at least 1", + }, + { + name: "BaseConfig", + cfg: func(c *kafka.SinkConfig) { + c.Brokers = []string{"test"} + c.Metadata.Retry.Max = -1 + }, + err: "Metadata.Retry.Max must be >= 0", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := kafka.NewSinkConfig() + tt.cfg(c) + + err := c.Validate() + + assert.Equal(t, tt.err, string(err.(sarama.ConfigurationError))) + }) + } +} + +func TestNewSink(t *testing.T) { + broker0 := sarama.NewMockBroker(t, 0) + defer broker0.Close() + broker0.SetHandlerByMap(map[string]sarama.MockResponse{ + "MetadataRequest": sarama.NewMockMetadataResponse(t). + SetBroker(broker0.Addr(), broker0.BrokerID()). + SetLeader("test_topic", 0, broker0.BrokerID()), + }) + c := kafka.NewSinkConfig() + c.Brokers = []string{broker0.Addr()} + c.Topic = "test_topic" + + p, err := kafka.NewSink(c) + + assert.NoError(t, err) + assert.IsType(t, &kafka.Sink{}, p) +} + +func TestNewSink_Error(t *testing.T) { + broker0 := sarama.NewMockBroker(t, 0) + broker0.Close() + c := kafka.NewSinkConfig() + c.Brokers = []string{broker0.Addr()} + c.Topic = "test_topic" + + _, err := kafka.NewSink(c) + + assert.Error(t, err) +} + +func TestNewSink_ValidatesConfig(t *testing.T) { + c := kafka.NewSinkConfig() + + _, err := kafka.NewSink(c) + + assert.Error(t, err) +} + +func TestSink_Process(t *testing.T) { + broker0 := sarama.NewMockBroker(t, 0) + defer broker0.Close() + broker0.SetHandlerByMap(map[string]sarama.MockResponse{ + "MetadataRequest": sarama.NewMockMetadataResponse(t). + SetBroker(broker0.Addr(), broker0.BrokerID()). + SetLeader("test_topic", 0, broker0.BrokerID()), + }) + + c := kafka.NewSinkConfig() + c.Brokers = []string{broker0.Addr()} + c.Topic = "test_topic" + c.ValueEncoder = kafka.StringEncoder{} + + p, _ := kafka.NewSink(c) + defer p.Close() + + pipe := mocks.NewPipe(t) + pipe.ExpectMark(nil, "foo") + p.WithPipe(pipe) + + err := p.Process(streams.NewMessage(nil, "foo")) + + assert.NoError(t, err) +} + +func TestSink_ProcessCommits(t *testing.T) { + broker0 := sarama.NewMockBroker(t, 0) + defer broker0.Close() + broker0.SetHandlerByMap(map[string]sarama.MockResponse{ + "MetadataRequest": sarama.NewMockMetadataResponse(t). + SetBroker(broker0.Addr(), broker0.BrokerID()). + SetLeader("test_topic", 0, broker0.BrokerID()), + }) + + c := kafka.NewSinkConfig() + c.Brokers = []string{broker0.Addr()} + c.Topic = "test_topic" + c.ValueEncoder = kafka.StringEncoder{} + c.BatchSize = 1 + + p, _ := kafka.NewSink(c) + defer p.Close() + + pipe := mocks.NewPipe(t) + pipe.ExpectCommit() + p.WithPipe(pipe) + + err := p.Process(streams.NewMessage(nil, "foo")) + + assert.NoError(t, err) +} + +func TestSink_Commit(t *testing.T) { + broker0 := sarama.NewMockBroker(t, 0) + defer broker0.Close() + broker0.SetHandlerByMap(map[string]sarama.MockResponse{ + "MetadataRequest": sarama.NewMockMetadataResponse(t). + SetBroker(broker0.Addr(), broker0.BrokerID()). + SetLeader("test_topic", 0, broker0.BrokerID()), + "ProduceRequest": sarama.NewMockProduceResponse(t), + }) + + c := kafka.NewSinkConfig() + c.Brokers = []string{broker0.Addr()} + c.Topic = "test_topic" + c.KeyEncoder = kafka.StringEncoder{} + c.ValueEncoder = kafka.StringEncoder{} + c.BatchSize = 1 + + p, _ := kafka.NewSink(c) + defer p.Close() + + pipe := mocks.NewPipe(t) + pipe.ExpectCommit() + p.WithPipe(pipe) + + p.Process(streams.NewMessage("foo", "foo")) + + err := p.Commit() + + assert.NoError(t, err) +} + +func TestSink_CommitError(t *testing.T) { + broker0 := sarama.NewMockBroker(t, 0) + defer broker0.Close() + broker0.SetHandlerByMap(map[string]sarama.MockResponse{ + "MetadataRequest": sarama.NewMockMetadataResponse(t). + SetBroker(broker0.Addr(), broker0.BrokerID()). + SetLeader("test_topic", 0, broker0.BrokerID()), + "ProduceRequest": sarama.NewMockProduceResponse(t). + SetError("test_topic", 0, sarama.ErrBrokerNotAvailable), + }) + + c := kafka.NewSinkConfig() + c.Brokers = []string{broker0.Addr()} + c.Topic = "test_topic" + c.ValueEncoder = kafka.StringEncoder{} + c.BatchSize = 1 + + p, _ := kafka.NewSink(c) + defer p.Close() + + pipe := mocks.NewPipe(t) + pipe.ExpectCommit() + p.WithPipe(pipe) + + p.Process(streams.NewMessage(nil, "foo")) + + err := p.Commit() + + assert.Error(t, err) +} diff --git a/kafka/source.go b/kafka/source.go index 8f8c37d..52a8b1c 100644 --- a/kafka/source.go +++ b/kafka/source.go @@ -6,7 +6,7 @@ import ( "github.com/Shopify/sarama" "github.com/bsm/sarama-cluster" - "github.com/msales/streams" + "github.com/msales/streams/v2" "github.com/pkg/errors" ) @@ -16,7 +16,7 @@ type SourceConfig struct { Brokers []string Topic string - GroupId string + GroupID string Ctx context.Context KeyDecoder Decoder @@ -25,12 +25,6 @@ type SourceConfig struct { BufferSize int } -type marker struct { - Topic string - Partition int32 - Offset int64 -} - // NewSourceConfig creates a new Kafka source configuration. func NewSourceConfig() *SourceConfig { c := &SourceConfig{ @@ -66,15 +60,88 @@ func (c *SourceConfig) Validate() error { return nil } +// Metadata represents an the kafka topic metadata. +type Metadata []*PartitionOffset + +func (m Metadata) find(topic string, partition int32) (int, *PartitionOffset) { + for i, pos := range m { + if pos.Topic == topic && pos.Partition == partition { + return i, pos + } + } + + return -1, nil +} + +// WithOrigin sets the MetadataOrigin on the metadata. +func (m Metadata) WithOrigin(o streams.MetadataOrigin) { + for _, pos := range m { + pos.Origin = o + } +} + +// Update updates the given metadata with the contained metadata. +func (m Metadata) Update(v streams.Metadata) streams.Metadata { + if v == nil { + return m + } + + metadata := v.(Metadata) + for _, newPos := range m { + i, oldPos := metadata.find(newPos.Topic, newPos.Partition) + if oldPos == nil { + metadata = append(metadata, newPos) + continue + } + + if newPos.Offset > oldPos.Offset { + metadata[i] = newPos + } + } + + return metadata +} + +// Merge merges the contained metadata into the given the metadata. +func (m Metadata) Merge(v streams.Metadata) streams.Metadata { + if v == nil { + return m + } + + metadata := v.(Metadata) + for _, newPos := range m { + i, oldPos := metadata.find(newPos.Topic, newPos.Partition) + if oldPos == nil { + metadata = append(metadata, newPos) + continue + } + + if (newPos.Origin == oldPos.Origin && newPos.Offset < oldPos.Offset) || (newPos.Origin < oldPos.Origin) { + metadata[i] = newPos + } + } + + return metadata +} + +// PartitionOffset represents the position in the stream of a message. +type PartitionOffset struct { + Origin streams.MetadataOrigin + + Topic string + Partition int32 + Offset int64 +} + // Source represents a Kafka stream source. type Source struct { + topic string consumer *cluster.Consumer ctx context.Context keyDecoder Decoder valueDecoder Decoder - state map[string]map[int32]int64 buf chan *sarama.ConsumerMessage lastErr error } @@ -89,18 +156,18 @@ func NewSource(c *SourceConfig) (*Source, error) { cc.Config = c.Config cc.Consumer.Return.Errors = true - consumer, err := cluster.NewConsumer(c.Brokers, c.GroupId, []string{c.Topic}, cc) + consumer, err := cluster.NewConsumer(c.Brokers, c.GroupID, []string{c.Topic}, cc) if err != nil { return nil, err } s := &Source{ + topic: c.Topic, consumer: consumer, ctx: c.Ctx, keyDecoder: c.KeyDecoder, valueDecoder: c.ValueDecoder, buf: make(chan *sarama.ConsumerMessage, c.BufferSize), - state: make(map[string]map[int32]int64), } go s.readErrors() @@ -128,7 +195,7 @@ func (s *Source) Consume() (*streams.Message, error) { } m := streams.NewMessageWithContext(s.ctx, k, v). - WithMetadata(s, s.markState(msg)) + WithMetadata(s, s.createMetadata(msg)) return m, nil case <-time.After(100 * time.Millisecond): @@ -138,13 +205,17 @@ func (s *Source) Consume() (*streams.Message, error) { // Commit marks the consumed records as processed. func (s *Source) Commit(v interface{}) error { - state := v.([]marker) - for _, m := range state { - s.consumer.MarkPartitionOffset(m.Topic, m.Partition, m.Offset, "") + if v == nil { + return nil + } + + state := v.(Metadata) + for _, pos := range state { + s.consumer.MarkPartitionOffset(pos.Topic, pos.Partition, pos.Offset, "") } if err := s.consumer.CommitOffsets(); err != nil { - return errors.Wrap(err, "streams: could not commit kafka offset") + return errors.Wrap(err, "kafka: could not commit kafka offset") } return nil @@ -155,23 +226,12 @@ func (s *Source) Close() error { return s.consumer.Close() } -func (s *Source) markState(msg *sarama.ConsumerMessage) []marker { - partitions, ok := s.state[msg.Topic] - if !ok { - partitions = make(map[int32]int64) - s.state[msg.Topic] = partitions - } - - partitions[msg.Partition] = msg.Offset - - var markers []marker - for topic, partitions := range s.state { - for partition, offset := range partitions { - markers = append(markers, marker{Topic: topic, Partition: partition, Offset: offset}) - } - } - - return markers +func (s *Source) createMetadata(msg *sarama.ConsumerMessage) Metadata { + return Metadata{&PartitionOffset{ + Topic: msg.Topic, + Partition: msg.Partition, + Offset: msg.Offset, + }} } func (s *Source) readErrors() { diff --git a/kafka/source_internal_test.go b/kafka/source_internal_test.go new file mode 100644 index 0000000..54cdc53 --- /dev/null +++ b/kafka/source_internal_test.go @@ -0,0 +1,71 @@ +package kafka + +import ( + "testing" + + "github.com/Shopify/sarama" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" +) + +func TestSource_ConsumeReturnsLastError(t *testing.T) { + want := errors.New("test") + s := Source{ + lastErr: want, + } + + _, err := s.Consume() + + assert.Equal(t, want, err) +} + +func TestSource_ConsumeReturnsKeyDecodeError(t *testing.T) { + s := Source{ + keyDecoder: errorDecoder{}, + buf: make(chan *sarama.ConsumerMessage, 1), + } + + s.buf <- &sarama.ConsumerMessage{ + Key: []byte(nil), + Value: []byte("foo"), + } + + _, err := s.Consume() + + assert.Error(t, err) +} + +func TestSource_ConsumeReturnsValueDecodeError(t *testing.T) { + s := Source{ + keyDecoder: ByteDecoder{}, + valueDecoder: errorDecoder{}, + buf: make(chan *sarama.ConsumerMessage, 1), + } + + s.buf <- &sarama.ConsumerMessage{ + Key: []byte(nil), + Value: []byte("foo"), + } + + _, err := s.Consume() + + assert.Error(t, err) +} + +func TestSource_ConsumeTimesOut(t *testing.T) { + s := Source{ + buf: make(chan *sarama.ConsumerMessage, 1), + } + + msg, err := s.Consume() + + assert.NoError(t, err) + assert.Equal(t, nil, msg.Key) + assert.Equal(t, nil, msg.Value) +} + +type errorDecoder struct{} + +func (errorDecoder) Decode([]byte) (interface{}, error) { + return nil, errors.New("test") +} diff --git a/kafka/source_test.go b/kafka/source_test.go new file mode 100644 index 0000000..60a9f00 --- /dev/null +++ b/kafka/source_test.go @@ -0,0 +1,478 @@ +package kafka_test + +import ( + "testing" + "time" + + "github.com/Shopify/sarama" + "github.com/msales/streams/v2" + "github.com/msales/streams/v2/kafka" + "github.com/stretchr/testify/assert" +) + +func TestNewSourceConfig(t *testing.T) { + c := kafka.NewSourceConfig() + + assert.IsType(t, &kafka.SourceConfig{}, c) +} + +func TestSourceConfig_Validate(t *testing.T) { + c := kafka.NewSourceConfig() + c.Brokers = []string{"test"} + + err := c.Validate() + + assert.NoError(t, err) +} + +func TestSourceConfig_ValidateErrors(t *testing.T) { + tests := []struct { + name string + cfg func(*kafka.SourceConfig) + err string + }{ + { + name: "Brokers", + cfg: func(c *kafka.SourceConfig) { + c.Brokers = []string{} + }, + err: "Brokers must have at least one broker", + }, + { + name: "KeyDecoder", + cfg: func(c *kafka.SourceConfig) { + c.Brokers = []string{"test"} + c.KeyDecoder = nil + }, + err: "KeyDecoder must be an instance of Decoder", + }, + { + name: "ValueDecoder", + cfg: func(c *kafka.SourceConfig) { + c.Brokers = []string{"test"} + c.ValueDecoder = nil + }, + err: "ValueDecoder must be an instance of Decoder", + }, + { + name: "BufferSize", + cfg: func(c *kafka.SourceConfig) { + c.Brokers = []string{"test"} + c.BufferSize = 0 + }, + err: "BufferSize must be at least 1", + }, + { + name: "BaseConfig", + cfg: func(c *kafka.SourceConfig) { + c.Brokers = []string{"test"} + c.Metadata.Retry.Max = -1 + }, + err: "Metadata.Retry.Max must be >= 0", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := kafka.NewSourceConfig() + tt.cfg(c) + + err := c.Validate() + + assert.Equal(t, tt.err, string(err.(sarama.ConfigurationError))) + }) + } +} + +func TestMetadata_WithOrigin(t *testing.T) { + meta := kafka.Metadata{{Topic: "foo", Partition: 0, Offset: 3}} + + meta.WithOrigin(streams.CommitterOrigin) + + assert.Equal(t, kafka.Metadata{{Topic: "foo", Partition: 0, Offset: 3, Origin: streams.CommitterOrigin}}, meta) +} + +func TestMetadata_Update(t *testing.T) { + meta1 := kafka.Metadata{{Topic: "foo", Partition: 0, Offset: 3}} + meta2 := kafka.Metadata{{Topic: "foo", Partition: 0, Offset: 2}} + + res := meta1.Update(meta2) + + assert.IsType(t, kafka.Metadata{}, res) + meta1 = res.(kafka.Metadata) + assert.Equal(t, kafka.Metadata{{Topic: "foo", Partition: 0, Offset: 3}}, meta1) +} + +func TestMetadata_UpdatePicksHighest(t *testing.T) { + meta1 := kafka.Metadata{{Topic: "foo", Partition: 0, Offset: 10}} + meta2 := kafka.Metadata{{Topic: "foo", Partition: 0, Offset: 3}} + + res := meta1.Update(meta2) + + assert.IsType(t, kafka.Metadata{}, res) + merged := res.(kafka.Metadata) + assert.Equal(t, kafka.Metadata{{Topic: "foo", Partition: 0, Offset: 10}}, merged) +} + +func TestMetadata_UpdateNilMerged(t *testing.T) { + meta := kafka.Metadata{{Topic: "foo", Partition: 0, Offset: 3}} + + res := meta.Update(nil) + + assert.IsType(t, kafka.Metadata{}, res) + merged := res.(kafka.Metadata) + assert.Equal(t, kafka.Metadata{{Topic: "foo", Partition: 0, Offset: 3}}, merged) +} + +func TestMetadata_UpdateNewPartition(t *testing.T) { + meta1 := kafka.Metadata{{Topic: "foo", Partition: 0, Offset: 10}} + meta2 := kafka.Metadata{{Topic: "foo", Partition: 1, Offset: 3}} + + res := meta2.Update(meta1) + + assert.IsType(t, kafka.Metadata{}, res) + merged := res.(kafka.Metadata) + assert.Equal(t, kafka.Metadata{{Topic: "foo", Partition: 0, Offset: 10}, {Topic: "foo", Partition: 1, Offset: 3}}, merged) +} + +func TestMetadata_Merge(t *testing.T) { + meta1 := kafka.Metadata{{Topic: "foo", Partition: 0, Offset: 3}} + meta2 := kafka.Metadata{{Topic: "foo", Partition: 1, Offset: 2}} + + res := meta2.Merge(meta1) + + assert.IsType(t, kafka.Metadata{}, res) + meta1 = res.(kafka.Metadata) + assert.Equal(t, kafka.Metadata{{Topic: "foo", Partition: 0, Offset: 3}, {Topic: "foo", Partition: 1, Offset: 2}}, meta1) +} + +func TestMetadata_MergeTakesCommitterOverProcessor(t *testing.T) { + meta1 := kafka.Metadata{{Topic: "foo", Partition: 0, Offset: 2, Origin: streams.ProcessorOrigin}} + meta2 := kafka.Metadata{{Topic: "foo", Partition: 0, Offset: 3, Origin: streams.CommitterOrigin}} + + res := meta2.Merge(meta1) + + assert.IsType(t, kafka.Metadata{}, res) + meta1 = res.(kafka.Metadata) + assert.Equal(t, kafka.Metadata{{Topic: "foo", Partition: 0, Offset: 3, Origin: streams.CommitterOrigin}}, meta1) +} + +func TestMetadata_MergeTakesLowestWhenTheSameOrigin(t *testing.T) { + meta1 := kafka.Metadata{{Topic: "foo", Partition: 0, Offset: 3, Origin: streams.ProcessorOrigin}} + meta2 := kafka.Metadata{{Topic: "foo", Partition: 0, Offset: 2, Origin: streams.ProcessorOrigin}} + + res := meta2.Merge(meta1) + + assert.IsType(t, kafka.Metadata{}, res) + meta1 = res.(kafka.Metadata) + assert.Equal(t, kafka.Metadata{{Topic: "foo", Partition: 0, Offset: 2, Origin: streams.ProcessorOrigin}}, meta1) +} + +func TestMetadata_MergeNilMerged(t *testing.T) { + b := kafka.Metadata{{Topic: "foo", Partition: 0, Offset: 3}} + + res := b.Merge(nil) + + assert.IsType(t, kafka.Metadata{}, res) + a := res.(kafka.Metadata) + assert.Equal(t, kafka.Metadata{{Topic: "foo", Partition: 0, Offset: 3}}, a) +} + +func BenchmarkMetadata_Merge(b *testing.B) { + var meta streams.Metadata = kafka.Metadata{{Topic: "test", Partition: 1, Offset: 2}} + other := kafka.Metadata{{Topic: "test", Partition: 2, Offset: 2}} + + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + meta = other.Update(meta) + } +} + +func TestNewSource(t *testing.T) { + broker0 := sarama.NewMockBroker(t, 0) + defer broker0.Close() + broker0.SetHandlerByMap(map[string]sarama.MockResponse{ + "MetadataRequest": sarama.NewMockMetadataResponse(t). + SetBroker(broker0.Addr(), broker0.BrokerID()). + SetLeader("test_topic", 0, broker0.BrokerID()), + "FindCoordinatorRequest": sarama.NewMockFindCoordinatorResponse(t). + SetCoordinator(sarama.CoordinatorGroup, "test_group", broker0), + "JoinGroupRequest": sarama.NewMockWrapper(&sarama.JoinGroupResponse{ + Version: 1, + Err: sarama.ErrNoError, + GroupProtocol: "protocol", + }), + "SyncGroupRequest": sarama.NewMockWrapper(&sarama.SyncGroupResponse{ + Err: sarama.ErrNoError, + MemberAssignment: []byte{ + 0, 1, // Version + 0, 0, 0, 1, // Topic array length + 0, 10, 't', 'e', 's', 't', '_', 't', 'o', 'p', 'i', 'c', // Topic one + 0, 0, 0, 1, // Topic one, partition array length + 0, 0, 0, 0, // 0 + 0, 0, 0, 3, 0x01, 0x02, 0x03, // Userdata + }, + }), + "OffsetFetchRequest": sarama.NewMockOffsetFetchResponse(t), + "LeaveGroupRequest": sarama.NewMockWrapper(&sarama.LeaveGroupResponse{ + Err: sarama.ErrNoError, + }), + }) + c := kafka.NewSourceConfig() + c.Brokers = []string{broker0.Addr()} + c.Topic = "test_topic" + c.GroupID = "test_group" + + s, err := kafka.NewSource(c) + + time.Sleep(100 * time.Millisecond) + + assert.NoError(t, err) + assert.IsType(t, &kafka.Source{}, s) + + if s != nil { + s.Close() + } +} + +func TestNewSource_ValidatesConfig(t *testing.T) { + c := kafka.NewSourceConfig() + + _, err := kafka.NewSource(c) + + assert.Error(t, err) +} + +func TestNewSource_Error(t *testing.T) { + broker0 := sarama.NewMockBroker(t, 0) + broker0.Close() + c := kafka.NewSourceConfig() + c.Brokers = []string{broker0.Addr()} + c.Topic = "test_topic" + c.GroupID = "test_group" + + _, err := kafka.NewSource(c) + + assert.Error(t, err) +} + +func TestSource_Consume(t *testing.T) { + broker0 := sarama.NewMockBroker(t, 0) + defer broker0.Close() + broker0.SetHandlerByMap(map[string]sarama.MockResponse{ + "MetadataRequest": sarama.NewMockMetadataResponse(t). + SetBroker(broker0.Addr(), broker0.BrokerID()). + SetLeader("test_topic", 0, broker0.BrokerID()), + "FindCoordinatorRequest": sarama.NewMockFindCoordinatorResponse(t). + SetCoordinator(sarama.CoordinatorGroup, "test_group", broker0), + "JoinGroupRequest": sarama.NewMockWrapper(&sarama.JoinGroupResponse{ + Version: 1, + Err: sarama.ErrNoError, + GroupProtocol: "protocol", + }), + "SyncGroupRequest": sarama.NewMockWrapper(&sarama.SyncGroupResponse{ + Err: sarama.ErrNoError, + MemberAssignment: []byte{ + 0, 1, // Version + 0, 0, 0, 1, // Topic array length + 0, 10, 't', 'e', 's', 't', '_', 't', 'o', 'p', 'i', 'c', // Topic one + 0, 0, 0, 1, // Topic one, partition array length + 0, 0, 0, 0, // 0 + 0, 0, 0, 3, 0x01, 0x02, 0x03, // Userdata + }, + }), + "OffsetFetchRequest": sarama.NewMockOffsetFetchResponse(t). + SetOffset("test_group", "test_topic", 0, 10, "", sarama.ErrNoError), + "OffsetRequest": sarama.NewMockOffsetResponse(t). + SetOffset("test_topic", 0, sarama.OffsetNewest, 10). + SetOffset("test_topic", 0, sarama.OffsetOldest, 7), + "FetchRequest": sarama.NewMockFetchResponse(t, 1). + SetMessage("test_topic", 0, 10, sarama.StringEncoder("foo")). + SetMessage("test_topic", 0, 11, sarama.StringEncoder("bar")). + SetMessage("test_topic", 0, 12, sarama.StringEncoder("baz")). + SetHighWaterMark("test_topic", 0, 14), + "LeaveGroupRequest": sarama.NewMockWrapper(&sarama.LeaveGroupResponse{ + Err: sarama.ErrNoError, + }), + }) + c := kafka.NewSourceConfig() + c.Brokers = []string{broker0.Addr()} + c.Topic = "test_topic" + c.GroupID = "test_group" + s, _ := kafka.NewSource(c) + defer s.Close() + + time.Sleep(100 * time.Millisecond) + + msg, err := s.Consume() + + assert.NoError(t, err) + assert.Equal(t, []byte(nil), msg.Key) + assert.Equal(t, []byte("foo"), msg.Value) +} + +func TestSource_Commit(t *testing.T) { + broker0 := sarama.NewMockBroker(t, 0) + defer broker0.Close() + broker0.SetHandlerByMap(map[string]sarama.MockResponse{ + "MetadataRequest": sarama.NewMockMetadataResponse(t). + SetBroker(broker0.Addr(), broker0.BrokerID()). + SetLeader("test_topic", 0, broker0.BrokerID()), + "FindCoordinatorRequest": sarama.NewMockFindCoordinatorResponse(t). + SetCoordinator(sarama.CoordinatorGroup, "test_group", broker0), + "JoinGroupRequest": sarama.NewMockWrapper(&sarama.JoinGroupResponse{ + Version: 1, + Err: sarama.ErrNoError, + GroupProtocol: "protocol", + }), + "SyncGroupRequest": sarama.NewMockWrapper(&sarama.SyncGroupResponse{ + Err: sarama.ErrNoError, + MemberAssignment: []byte{ + 0, 1, // Version + 0, 0, 0, 1, // Topic array length + 0, 10, 't', 'e', 's', 't', '_', 't', 'o', 'p', 'i', 'c', // Topic one + 0, 0, 0, 1, // Topic one, partition array length + 0, 0, 0, 0, // 0 + 0, 0, 0, 3, 0x01, 0x02, 0x03, // Userdata + }, + }), + "OffsetFetchRequest": sarama.NewMockOffsetFetchResponse(t). + SetOffset("test_group", "test_topic", 0, 10, "", sarama.ErrNoError), + "OffsetRequest": sarama.NewMockOffsetResponse(t). + SetOffset("test_topic", 0, sarama.OffsetNewest, 10). + SetOffset("test_topic", 0, sarama.OffsetOldest, 7), + "FetchRequest": sarama.NewMockFetchResponse(t, 1). + SetMessage("test_topic", 0, 10, sarama.StringEncoder("foo")). + SetHighWaterMark("test_topic", 0, 14), + "OffsetCommitRequest": sarama.NewMockOffsetCommitResponse(t), + "LeaveGroupRequest": sarama.NewMockWrapper(&sarama.LeaveGroupResponse{ + Err: sarama.ErrNoError, + }), + }) + c := kafka.NewSourceConfig() + c.Brokers = []string{broker0.Addr()} + c.Topic = "test_topic" + c.GroupID = "test_group" + s, _ := kafka.NewSource(c) + defer s.Close() + meta := kafka.Metadata{{Topic: "test_topic", Partition: 0, Offset: 10}} + + time.Sleep(100 * time.Millisecond) + + s.Consume() + + err := s.Commit(meta) + + assert.NoError(t, err) +} + +func TestSource_CommitNilMetadata(t *testing.T) { + broker0 := sarama.NewMockBroker(t, 0) + defer broker0.Close() + broker0.SetHandlerByMap(map[string]sarama.MockResponse{ + "MetadataRequest": sarama.NewMockMetadataResponse(t). + SetBroker(broker0.Addr(), broker0.BrokerID()). + SetLeader("test_topic", 0, broker0.BrokerID()), + "FindCoordinatorRequest": sarama.NewMockFindCoordinatorResponse(t). + SetCoordinator(sarama.CoordinatorGroup, "test_group", broker0), + "JoinGroupRequest": sarama.NewMockWrapper(&sarama.JoinGroupResponse{ + Version: 1, + Err: sarama.ErrNoError, + GroupProtocol: "protocol", + }), + "SyncGroupRequest": sarama.NewMockWrapper(&sarama.SyncGroupResponse{ + Err: sarama.ErrNoError, + MemberAssignment: []byte{ + 0, 1, // Version + 0, 0, 0, 1, // Topic array length + 0, 10, 't', 'e', 's', 't', '_', 't', 'o', 'p', 'i', 'c', // Topic one + 0, 0, 0, 1, // Topic one, partition array length + 0, 0, 0, 0, // 0 + 0, 0, 0, 3, 0x01, 0x02, 0x03, // Userdata + }, + }), + "OffsetFetchRequest": sarama.NewMockOffsetFetchResponse(t). + SetOffset("test_group", "test_topic", 0, 10, "", sarama.ErrNoError), + "OffsetRequest": sarama.NewMockOffsetResponse(t). + SetOffset("test_topic", 0, sarama.OffsetNewest, 10). + SetOffset("test_topic", 0, sarama.OffsetOldest, 7), + "FetchRequest": sarama.NewMockFetchResponse(t, 1). + SetMessage("test_topic", 0, 10, sarama.StringEncoder("foo")). + SetHighWaterMark("test_topic", 0, 14), + "LeaveGroupRequest": sarama.NewMockWrapper(&sarama.LeaveGroupResponse{ + Err: sarama.ErrNoError, + }), + }) + c := kafka.NewSourceConfig() + c.Brokers = []string{broker0.Addr()} + c.Topic = "test_topic" + c.GroupID = "test_group" + s, _ := kafka.NewSource(c) + defer s.Close() + + time.Sleep(100 * time.Millisecond) + + s.Consume() + + err := s.Commit(nil) + + assert.NoError(t, err) +} + +func TestSource_CommitReturnError(t *testing.T) { + broker0 := sarama.NewMockBroker(t, 0) + defer broker0.Close() + broker0.SetHandlerByMap(map[string]sarama.MockResponse{ + "MetadataRequest": sarama.NewMockMetadataResponse(t). + SetBroker(broker0.Addr(), broker0.BrokerID()). + SetLeader("test_topic", 0, broker0.BrokerID()), + "FindCoordinatorRequest": sarama.NewMockFindCoordinatorResponse(t). + SetCoordinator(sarama.CoordinatorGroup, "test_group", broker0), + "JoinGroupRequest": sarama.NewMockWrapper(&sarama.JoinGroupResponse{ + Version: 1, + Err: sarama.ErrNoError, + GroupProtocol: "protocol", + }), + "SyncGroupRequest": sarama.NewMockWrapper(&sarama.SyncGroupResponse{ + Err: sarama.ErrNoError, + MemberAssignment: []byte{ + 0, 1, // Version + 0, 0, 0, 1, // Topic array length + 0, 10, 't', 'e', 's', 't', '_', 't', 'o', 'p', 'i', 'c', // Topic one + 0, 0, 0, 1, // Topic one, partition array length + 0, 0, 0, 0, // 0 + 0, 0, 0, 3, 0x01, 0x02, 0x03, // Userdata + }, + }), + "OffsetFetchRequest": sarama.NewMockOffsetFetchResponse(t). + SetOffset("test_group", "test_topic", 0, 10, "", sarama.ErrNoError), + "OffsetRequest": sarama.NewMockOffsetResponse(t). + SetOffset("test_topic", 0, sarama.OffsetNewest, 10). + SetOffset("test_topic", 0, sarama.OffsetOldest, 7), + "FetchRequest": sarama.NewMockFetchResponse(t, 1). + SetMessage("test_topic", 0, 10, sarama.StringEncoder("foo")). + SetHighWaterMark("test_topic", 0, 14), + "OffsetCommitRequest": sarama.NewMockOffsetCommitResponse(t). + SetError("test_group", "test_topic", 0, sarama.ErrBrokerNotAvailable), + "LeaveGroupRequest": sarama.NewMockWrapper(&sarama.LeaveGroupResponse{ + Err: sarama.ErrNoError, + }), + }) + c := kafka.NewSourceConfig() + c.Brokers = []string{broker0.Addr()} + c.Topic = "test_topic" + c.GroupID = "test_group" + s, _ := kafka.NewSource(c) + defer s.Close() + meta := kafka.Metadata{{Topic: "test_topic", Partition: 0, Offset: 10}} + + time.Sleep(100 * time.Millisecond) + + s.Consume() + + err := s.Commit(meta) + + assert.Error(t, err) +} diff --git a/message.go b/message.go index 32dc5e8..89b58d6 100644 --- a/message.go +++ b/message.go @@ -4,23 +4,44 @@ import ( "context" ) +// MetadataOrigin represents the metadata origin type. +type MetadataOrigin uint8 + +// MetadataOrigin types +const ( + CommitterOrigin MetadataOrigin = iota + ProcessorOrigin +) + +// Metadata represents metadata that can be merged. +type Metadata interface { + // WithOrigin sets the MetadataOrigin on the metadata. + WithOrigin(MetadataOrigin) + // Update updates the given metadata with the contained metadata. + Update(Metadata) Metadata + // Merge merges the contained metadata into the given the metadata. + Merge(Metadata) Metadata +} + // Message represents data the flows through the stream. type Message struct { - metadata map[Source]interface{} + source Source + metadata Metadata Ctx context.Context Key interface{} Value interface{} } -// Metadata returns the Message metadata. -func (m *Message) Metadata() map[Source]interface{} { - return m.metadata +// Metadata returns the Message Metadata. +func (m *Message) Metadata() (Source, Metadata) { + return m.source, m.metadata } -// WithMetadata add metadata to the Message from a Source. -func (m *Message) WithMetadata(s Source, v interface{}) *Message { - m.metadata[s] = v +// WithMetadata add metadata to the Message for a Source. +func (m *Message) WithMetadata(s Source, v Metadata) *Message { + m.source = s + m.metadata = v return m } @@ -33,7 +54,8 @@ func (m Message) Empty() bool { // NewMessage creates a Message. func NewMessage(k, v interface{}) *Message { return &Message{ - metadata: map[Source]interface{}{}, + source: nil, + metadata: nil, Ctx: context.Background(), Key: k, Value: v, @@ -43,7 +65,8 @@ func NewMessage(k, v interface{}) *Message { // NewMessageWithContext creates a Message with the given context. func NewMessageWithContext(ctx context.Context, k, v interface{}) *Message { return &Message{ - metadata: map[Source]interface{}{}, + source: nil, + metadata: nil, Ctx: ctx, Key: k, Value: v, diff --git a/message_test.go b/message_test.go index e225e65..c4dd2ea 100644 --- a/message_test.go +++ b/message_test.go @@ -1,9 +1,10 @@ -package streams +package streams_test import ( "context" "testing" + "github.com/msales/streams/v2" "github.com/stretchr/testify/assert" ) @@ -20,14 +21,14 @@ func TestMessage_Empty(t *testing.T) { } for _, tt := range tests { - msg := NewMessage(tt.key, tt.value) + msg := streams.NewMessage(tt.key, tt.value) assert.Equal(t, tt.empty, msg.Empty()) } } func TestNewMessage(t *testing.T) { - msg := NewMessage("test", "test") + msg := streams.NewMessage("test", "test") assert.Equal(t, context.Background(), msg.Ctx) assert.Equal(t, "test", msg.Key) @@ -38,7 +39,7 @@ type ctxKey string func TestNewMessageWithContext(t *testing.T) { ctx := context.WithValue(context.Background(), ctxKey("1"), "2") - msg := NewMessageWithContext(ctx, "test", "test") + msg := streams.NewMessageWithContext(ctx, "test", "test") assert.Equal(t, ctx, msg.Ctx) assert.Equal(t, "test", msg.Key) @@ -46,10 +47,13 @@ func TestNewMessageWithContext(t *testing.T) { } func TestMessage_Metadata(t *testing.T) { - msg := NewMessage("test", "test") + s := new(MockSource) + m := new(MockMetadata) + msg := streams.NewMessage("test", "test") - msg.WithMetadata(nil, "test") + msg.WithMetadata(s, m) - assert.Len(t, msg.Metadata(), 1) - assert.Equal(t, "test", msg.Metadata()[nil]) + src, meta := msg.Metadata() + assert.Equal(t, s, src) + assert.Equal(t, m, meta) } diff --git a/metastore.go b/metastore.go new file mode 100644 index 0000000..757428e --- /dev/null +++ b/metastore.go @@ -0,0 +1,144 @@ +package streams + +import ( + "sync" + "sync/atomic" +) + +// Metastore represents a metadata store. +// +// Metastore is only partially concurrency safe. Mark and PullAll +// can be used concurrently, but Mark and Pull cannot. This is done +// to avoid locks and improve performance. +type Metastore interface { + // Pull gets and clears the processors metadata. + Pull(Processor) (Metaitems, error) + // PullAll gets and clears all metadata. + PullAll() (map[Processor]Metaitems, error) + // Mark sets metadata for a processor. + Mark(Processor, Source, Metadata) error +} + +// Metaitem represents the source metadata combination. +type Metaitem struct { + Source Source + Metadata Metadata +} + +// Metaitems represents a slice of Metaitem pointers. +type Metaitems []*Metaitem + +// Update combines contents of two Metaitems objects, updating the Metadata where necessary. +func (m Metaitems) Update(items Metaitems) Metaitems { + return m.join(items, func(old, new Metadata) Metadata { + return old.Update(new) + }) +} + +// Merge combines contents of two Metaitems objects, merging the Metadata where necessary. +func (m Metaitems) Merge(items Metaitems) Metaitems { + return m.join(items, func(old, new Metadata) Metadata { + return old.Merge(new) + }) +} + +func (m Metaitems) join(items Metaitems, fn func(old, new Metadata) Metadata) Metaitems { +OUTER: + for _, newItem := range items { + for _, oldItem := range m { + if oldItem.Source == newItem.Source { + if oldItem.Metadata != nil { + oldItem.Metadata = fn(oldItem.Metadata, newItem.Metadata) + } + continue OUTER + } + } + + m = append(m, newItem) + } + + return m +} + +type metastore struct { + metadata atomic.Value // map[Processor][]Metaitem + + procMu sync.Mutex +} + +// NewMetastore creates a new Metastore instance. +func NewMetastore() Metastore { + s := &metastore{} + s.metadata.Store(&map[Processor]Metaitems{}) + + return s +} + +// Pull gets and clears the processors metadata. +func (s *metastore) Pull(p Processor) (Metaitems, error) { + meta := s.metadata.Load().(*map[Processor]Metaitems) + + // We dont lock here as the Pump should be locked at this point + items, ok := (*meta)[p] + if ok { + delete(*meta, p) + return items, nil + } + + return nil, nil +} + +// PullAll gets and clears all metadata. +func (s *metastore) PullAll() (map[Processor]Metaitems, error) { + meta := s.metadata.Load().(*map[Processor]Metaitems) + + s.metadata.Store(&map[Processor]Metaitems{}) + + // Make sure no marks are happening on the old metadata + s.procMu.Lock() + s.procMu.Unlock() + + return *meta, nil +} + +// Mark sets metadata for a processor. +func (s *metastore) Mark(p Processor, src Source, meta Metadata) error { + if p == nil { + return nil + } + + procMeta := s.metadata.Load().(*map[Processor]Metaitems) + + if meta != nil { + o := ProcessorOrigin + if _, ok := p.(Committer); ok { + o = CommitterOrigin + } + + meta.WithOrigin(o) + } + + s.procMu.Lock() + items, ok := (*procMeta)[p] + if !ok { + (*procMeta)[p] = Metaitems{{Source: src, Metadata: meta}} + s.procMu.Unlock() + return nil + } + s.procMu.Unlock() + + if src == nil || meta == nil { + return nil + } + + for _, item := range items { + if item.Source == src { + item.Metadata = meta.Update(item.Metadata) + return nil + } + } + + items = append(items, &Metaitem{Source: src, Metadata: meta}) + (*procMeta)[p] = items + return nil +} diff --git a/metastore_test.go b/metastore_test.go new file mode 100644 index 0000000..0e2df9e --- /dev/null +++ b/metastore_test.go @@ -0,0 +1,316 @@ +package streams_test + +import ( + "testing" + + "github.com/msales/streams/v2" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func TestMetaitems_Update(t *testing.T) { + src1 := new(MockSource) + src2 := new(MockSource) + src3 := new(MockSource) + + meta1 := new(MockMetadata) + meta2 := new(MockMetadata) + meta3 := new(MockMetadata) + meta4 := new(MockMetadata) + meta5 := new(MockMetadata) // == meta2.Update(meta4) + + item1 := &streams.Metaitem{Source: src1, Metadata: meta1} + item2 := &streams.Metaitem{Source: src2, Metadata: meta2} + item3 := &streams.Metaitem{Source: src3, Metadata: meta3} + item4 := &streams.Metaitem{Source: src2, Metadata: meta4} // src2! + + items := streams.Metaitems{item1, item2} + other := streams.Metaitems{item3, item4} + + meta2.On("Update", mock.Anything).Return(meta5) + + joined := items.Update(other) + + assert.Len(t, joined, 3) + assert.True(t, joined[0] == item1) + assert.True(t, joined[1] == item2) + assert.True(t, joined[2] == item3) + assert.True(t, meta5 == item2.Metadata) + meta2.AssertCalled(t, "Update", meta4) +} + +func TestMetaitems_UpdateHandlesNilSourceAndMetadata(t *testing.T) { + items := streams.Metaitems{{Source: nil, Metadata: nil}} + other := streams.Metaitems{{Source: nil, Metadata: nil}} + + joined := items.Update(other) + + assert.Len(t, joined, 1) +} + +func TestMetaitems_Merge(t *testing.T) { + src1 := new(MockSource) + src2 := new(MockSource) + src3 := new(MockSource) + + meta1 := new(MockMetadata) + meta2 := new(MockMetadata) + meta3 := new(MockMetadata) + meta4 := new(MockMetadata) + meta5 := new(MockMetadata) // == meta2.Update(meta4) + + item1 := &streams.Metaitem{Source: src1, Metadata: meta1} + item2 := &streams.Metaitem{Source: src2, Metadata: meta2} + item3 := &streams.Metaitem{Source: src3, Metadata: meta3} + item4 := &streams.Metaitem{Source: src2, Metadata: meta4} // src2! + + items := streams.Metaitems{item1, item2} + other := streams.Metaitems{item3, item4} + + meta2.On("Merge", mock.Anything).Return(meta5) + + merged := items.Merge(other) + + assert.Len(t, merged, 3) + assert.True(t, merged[0] == item1) + assert.True(t, merged[1] == item2) + assert.True(t, merged[2] == item3) + assert.True(t, meta5 == item2.Metadata) + meta2.AssertCalled(t, "Merge", meta4) +} + +func TestMetaitems_MergeHandlesNilSourceAndMetadata(t *testing.T) { + items := streams.Metaitems{{Source: nil, Metadata: nil}} + other := streams.Metaitems{{Source: nil, Metadata: nil}} + + joined := items.Merge(other) + + assert.Len(t, joined, 1) +} + +func BenchmarkMetaitems_Update(b *testing.B) { + src1 := &fakeSource{} + src2 := &fakeSource{} + src3 := &fakeSource{} + + meta1 := &fakeMetadata{} + meta2 := &fakeMetadata{} + meta3 := &fakeMetadata{} + meta4 := &fakeMetadata{} + + items1 := streams.Metaitems{{Source: src1, Metadata: meta1}, {Source: src2, Metadata: meta2}} + items2 := streams.Metaitems{{Source: src3, Metadata: meta3}, {Source: src2, Metadata: meta4}} + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + items1.Update(items2) + } +} + +func BenchmarkMetaitems_Merge(b *testing.B) { + src1 := &fakeSource{} + src2 := &fakeSource{} + src3 := &fakeSource{} + + meta1 := &fakeMetadata{} + meta2 := &fakeMetadata{} + meta3 := &fakeMetadata{} + meta4 := &fakeMetadata{} + + items1 := streams.Metaitems{{Source: src1, Metadata: meta1}, {Source: src2, Metadata: meta2}} + items2 := streams.Metaitems{{Source: src3, Metadata: meta3}, {Source: src2, Metadata: meta4}} + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + items1.Merge(items2) + } +} + +func TestMetastore_PullAll(t *testing.T) { + p := new(MockProcessor) + src := new(MockSource) + meta := new(MockMetadata) + meta.On("WithOrigin", streams.ProcessorOrigin) + s := streams.NewMetastore() + _ = s.Mark(p, src, meta) + + procMeta, err := s.PullAll() + + assert.NoError(t, err) + assert.Equal(t, map[streams.Processor]streams.Metaitems{p: {{Source: src, Metadata: meta}}}, procMeta) +} + +func TestMetastore_PullAllClearsMetastore(t *testing.T) { + p := new(MockProcessor) + src := new(MockSource) + meta := new(MockMetadata) + meta.On("WithOrigin", streams.ProcessorOrigin) + s := streams.NewMetastore() + _ = s.Mark(p, src, meta) + + _, _ = s.PullAll() + procMeta, err := s.PullAll() + + assert.NoError(t, err) + assert.Equal(t, map[streams.Processor]streams.Metaitems{}, procMeta) +} + +func TestMetastore_MarkProcessor(t *testing.T) { + p := new(MockProcessor) + src := new(MockSource) + meta1 := new(MockMetadata) + meta1.On("WithOrigin", streams.ProcessorOrigin) + newMeta := new(MockMetadata) + meta2 := new(MockMetadata) + meta2.On("WithOrigin", streams.ProcessorOrigin) + meta2.On("Update", meta1).Return(newMeta) + s := streams.NewMetastore() + + err := s.Mark(p, src, meta1) + assert.NoError(t, err) + + err = s.Mark(p, src, meta2) + assert.NoError(t, err) + + procMeta, _ := s.PullAll() + assert.Equal(t, map[streams.Processor]streams.Metaitems{p: {{Source: src, Metadata: newMeta}}}, procMeta) +} + +func TestMetastore_MarkCommitter(t *testing.T) { + p := new(MockCommitter) + src := new(MockSource) + meta1 := new(MockMetadata) + meta1.On("WithOrigin", streams.CommitterOrigin) + newMeta := new(MockMetadata) + meta2 := new(MockMetadata) + meta2.On("WithOrigin", streams.CommitterOrigin) + meta2.On("Update", meta1).Return(newMeta) + s := streams.NewMetastore() + + err := s.Mark(p, src, meta1) + assert.NoError(t, err) + + err = s.Mark(p, src, meta2) + assert.NoError(t, err) + + procMeta, _ := s.PullAll() + assert.Equal(t, map[streams.Processor]streams.Metaitems{p: {{Source: src, Metadata: newMeta}}}, procMeta) +} + +func TestMetastore_MarkNilProcessor(t *testing.T) { + src := new(MockSource) + meta := new(MockMetadata) + s := streams.NewMetastore() + + err := s.Mark(nil, src, meta) + assert.NoError(t, err) + + err = s.Mark(nil, src, meta) + assert.NoError(t, err) + + procMeta, _ := s.PullAll() + assert.Equal(t, map[streams.Processor]streams.Metaitems{}, procMeta) +} + +func TestMetastore_MarkNilSource(t *testing.T) { + p := new(MockProcessor) + meta := new(MockMetadata) + meta.On("WithOrigin", streams.ProcessorOrigin) + s := streams.NewMetastore() + + err := s.Mark(p, nil, meta) + assert.NoError(t, err) + + err = s.Mark(p, nil, meta) + assert.NoError(t, err) + + procMeta, _ := s.PullAll() + assert.Equal(t, map[streams.Processor]streams.Metaitems{p: {{Source: nil, Metadata: meta}}}, procMeta) +} + +func TestMetastore_MarkNilMetadata(t *testing.T) { + p := new(MockProcessor) + src := new(MockSource) + s := streams.NewMetastore() + + err := s.Mark(p, src, nil) + assert.NoError(t, err) + + err = s.Mark(p, src, nil) + assert.NoError(t, err) + + procMeta, _ := s.PullAll() + assert.Equal(t, map[streams.Processor]streams.Metaitems{p: {{Source: src, Metadata: nil}}}, procMeta) +} + +func TestMetastore_MarkMultipleSources(t *testing.T) { + p := new(MockProcessor) + src1 := new(MockSource) + src2 := new(MockSource) + meta := new(MockMetadata) + meta.On("WithOrigin", streams.ProcessorOrigin) + s := streams.NewMetastore() + + err := s.Mark(p, src1, meta) + assert.NoError(t, err) + + err = s.Mark(p, src2, meta) + assert.NoError(t, err) + + procMeta, _ := s.PullAll() + assert.Equal(t, map[streams.Processor]streams.Metaitems{p: {{Source: src1, Metadata: meta}, {Source: src2, Metadata: meta}}}, procMeta) +} + +func TestMetastore_Pull(t *testing.T) { + p := new(MockProcessor) + src := new(MockSource) + meta := new(MockMetadata) + meta.On("WithOrigin", streams.ProcessorOrigin) + s := streams.NewMetastore() + _ = s.Mark(p, src, meta) + + pulled, err := s.Pull(p) + + assert.NoError(t, err) + assert.Equal(t, streams.Metaitems{{Source: src, Metadata: meta}}, pulled) +} + +func TestMetastore_PullClearsProcessor(t *testing.T) { + p := new(MockProcessor) + src := new(MockSource) + meta := new(MockMetadata) + meta.On("WithOrigin", streams.ProcessorOrigin) + s := streams.NewMetastore() + _ = s.Mark(p, src, meta) + + _, _ = s.Pull(p) + pulled, err := s.Pull(p) + + assert.NoError(t, err) + assert.Equal(t, streams.Metaitems(nil), pulled) +} + +func TestMetastore_PullReturnsNilIfDoesntExist(t *testing.T) { + p := new(MockProcessor) + s := streams.NewMetastore() + + pulled, err := s.Pull(p) + + assert.NoError(t, err) + assert.Equal(t, streams.Metaitems(nil), pulled) +} + +func BenchmarkMetastore_Mark(b *testing.B) { + p := new(MockProcessor) + src := new(MockSource) + meta := &fakeMetadata{} + s := streams.NewMetastore() + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = s.Mark(p, src, meta) + } +} diff --git a/mocks/pipe.go b/mocks/pipe.go index 4b33fbf..1bb7c2f 100644 --- a/mocks/pipe.go +++ b/mocks/pipe.go @@ -4,7 +4,7 @@ import ( "errors" "testing" - "github.com/msales/streams" + "github.com/msales/streams/v2" ) type record struct { @@ -29,6 +29,7 @@ type Pipe struct { shouldError bool + expectMark []record expectForward []record expectCommit bool } @@ -38,6 +39,7 @@ func NewPipe(t *testing.T) *Pipe { return &Pipe{ t: t, msgs: []ChildMessage{}, + expectMark: []record{}, expectForward: []record{}, } } @@ -47,6 +49,28 @@ func (p *Pipe) Messages() []ChildMessage { return p.msgs } +// Mark indicates that the message has been delt with +func (p *Pipe) Mark(msg *streams.Message) error { + if len(p.expectMark) == 0 { + p.t.Error("streams: mock: Unexpected call to Mark") + return nil + } + record := p.expectMark[0] + p.expectMark = p.expectMark[1:] + + if (record.key != Anything && msg.Key != record.key) || + (record.value != Anything && msg.Value != record.value) { + p.t.Errorf("streams: mock: Arguments to Mark did not match expectation: wanted %v:%v, got %v:%v", record.key, record.value, msg.Key, msg.Value) + } + + if p.shouldError { + p.shouldError = false + return errors.New("test") + } + + return nil +} + // Forward queues the data to all processor children in the topology. func (p *Pipe) Forward(msg *streams.Message) error { if len(p.expectForward) == 0 { @@ -117,23 +141,32 @@ func (p *Pipe) ShouldError() { p.shouldError = true } -// ExpectForward registers an expectation of a Forward of the Pipe. +// ExpectMark registers an expectation of a Mark on the Pipe. +func (p *Pipe) ExpectMark(k, v interface{}) { + p.expectMark = append(p.expectMark, record{k, v, -1}) +} + +// ExpectForward registers an expectation of a Forward on the Pipe. func (p *Pipe) ExpectForward(k, v interface{}) { p.expectForward = append(p.expectForward, record{k, v, -1}) } -// ExpectForwardToChild registers an expectation of a ForwardToChild the Pipe. +// ExpectForwardToChild registers an expectation of a ForwardToChild on the Pipe. func (p *Pipe) ExpectForwardToChild(k, v interface{}, index int) { p.expectForward = append(p.expectForward, record{k, v, index}) } -// ExpectCommit registers an expectation of a Commit the Pipe. +// ExpectCommit registers an expectation of a Commit on the Pipe. func (p *Pipe) ExpectCommit() { p.expectCommit = true } // AssertExpectations asserts that the expectations were met. func (p *Pipe) AssertExpectations() { + if len(p.expectMark) > 0 { + p.t.Error("streams: mock: Expected a call to Mark but got none") + } + if len(p.expectForward) > 0 { p.t.Error("streams: mock: Expected a call to Forward or ForwardToChild but got none") } diff --git a/mocks/pipe_test.go b/mocks/pipe_test.go index 7f6e625..f9ce3d1 100644 --- a/mocks/pipe_test.go +++ b/mocks/pipe_test.go @@ -3,8 +3,8 @@ package mocks_test import ( "testing" - "github.com/msales/streams" - "github.com/msales/streams/mocks" + "github.com/msales/streams/v2" + "github.com/msales/streams/v2/mocks" "github.com/stretchr/testify/assert" ) @@ -45,10 +45,12 @@ func TestPipe_MessagesForForwardToChild(t *testing.T) { func TestPipe_HandlesExpectations(t *testing.T) { p := mocks.NewPipe(t) + p.ExpectMark("test", "test") p.ExpectForward("test", "test") p.ExpectForwardToChild("test", "test", 1) p.ExpectCommit() + p.Mark(streams.NewMessage("test", "test")) p.Forward(streams.NewMessage("test", "test")) p.ForwardToChild(streams.NewMessage("test", "test"), 1) p.Commit(streams.NewMessage("test", "test")) @@ -58,14 +60,56 @@ func TestPipe_HandlesExpectations(t *testing.T) { func TestPipe_HandlesAnythingExpectations(t *testing.T) { p := mocks.NewPipe(t) + p.ExpectMark(mocks.Anything, mocks.Anything) p.ExpectForward(mocks.Anything, mocks.Anything) p.ExpectForwardToChild(mocks.Anything, mocks.Anything, 1) + p.Mark(streams.NewMessage("test", "test")) p.Forward(streams.NewMessage("test", "test")) p.ForwardToChild(streams.NewMessage("test", "test"), 1) p.AssertExpectations() } +func TestPipe_WithoutExpectationOnMark(t *testing.T) { + mockT := new(testing.T) + defer func() { + if !mockT.Failed() { + t.Error("Expected error when no expectation on Mark") + } + + }() + p := mocks.NewPipe(mockT) + + p.Mark(streams.NewMessage("test", "test")) +} + +func TestPipe_WithWrongExpectationOnMark(t *testing.T) { + mockT := new(testing.T) + defer func() { + if !mockT.Failed() { + t.Error("Expected error when wrong expectation on Mark") + } + + }() + p := mocks.NewPipe(mockT) + p.ExpectMark(1, 1) + + p.Mark(streams.NewMessage("test", "test")) +} + +func TestPipe_WithShouldErrorOnMark(t *testing.T) { + mockT := new(testing.T) + p := mocks.NewPipe(mockT) + p.ExpectMark("test", "test") + p.ShouldError() + + err := p.Mark(streams.NewMessage("test", "test")) + + if err == nil { + t.Error("Expected error but got none") + } +} + func TestPipe_WithoutExpectationOnForward(t *testing.T) { mockT := new(testing.T) defer func() { @@ -172,11 +216,25 @@ func TestPipe_WithErrorOnCommit(t *testing.T) { } } +func TestPipe_WithUnfulfilledExpectationOnMark(t *testing.T) { + mockT := new(testing.T) + defer func() { + if !mockT.Failed() { + t.Error("Expected error when unfulfilled expectation on Mark") + } + + }() + p := mocks.NewPipe(mockT) + p.ExpectMark(1, 1) + + p.AssertExpectations() +} + func TestPipe_WithUnfulfilledExpectationOnForward(t *testing.T) { mockT := new(testing.T) defer func() { if !mockT.Failed() { - t.Error("Expected error when unforfilled expectation on Forward") + t.Error("Expected error when unfulfilled expectation on Forward") } }() @@ -190,7 +248,7 @@ func TestPipe_WithUnfulfilledExpectationOnForwardToChild(t *testing.T) { mockT := new(testing.T) defer func() { if !mockT.Failed() { - t.Error("Expected error when unforfilled expectation on ForwardToChild") + t.Error("Expected error when unfulfilled expectation on ForwardToChild") } }() @@ -204,7 +262,7 @@ func TestPipe_WithUnfulfilledExpectationOnCommit(t *testing.T) { mockT := new(testing.T) defer func() { if !mockT.Failed() { - t.Error("Expected error when unforfilled expectation on Commit") + t.Error("Expected error when unfulfilled expectation on Commit") } }() diff --git a/mocks_test.go b/mocks_test.go index f451b7d..6b08053 100644 --- a/mocks_test.go +++ b/mocks_test.go @@ -3,10 +3,30 @@ package streams_test import ( "time" - "github.com/msales/streams" + "github.com/msales/streams/v2" "github.com/stretchr/testify/mock" ) +var _ = (streams.Metadata)(&MockMetadata{}) + +type MockMetadata struct { + mock.Mock +} + +func (m *MockMetadata) WithOrigin(o streams.MetadataOrigin) { + m.Called(o) +} + +func (m *MockMetadata) Update(v streams.Metadata) streams.Metadata { + args := m.Called(v) + return args.Get(0).(streams.Metadata) +} + +func (m *MockMetadata) Merge(v streams.Metadata) streams.Metadata { + args := m.Called(v) + return args.Get(0).(streams.Metadata) +} + var _ = (streams.Node)(&MockNode{}) type MockNode struct { @@ -27,11 +47,65 @@ func (mn *MockNode) Children() []streams.Node { return args.Get(0).([]streams.Node) } -func (mn *MockNode) Processor() (streams.Processor) { +func (mn *MockNode) Processor() streams.Processor { args := mn.Called() return args.Get(0).(streams.Processor) } +var _ = (streams.Metastore)(&MockMetastore{}) + +type MockMetastore struct { + mock.Mock +} + +func (s *MockMetastore) Pull(p streams.Processor) (streams.Metaitems, error) { + args := s.Called(p) + + if args.Get(0) == nil { + return nil, args.Error(1) + } + + return args.Get(0).(streams.Metaitems), args.Error(1) +} + +func (s *MockMetastore) PullAll() (map[streams.Processor]streams.Metaitems, error) { + args := s.Called() + + if args.Get(0) == nil { + return nil, args.Error(1) + } + + return args.Get(0).(map[streams.Processor]streams.Metaitems), args.Error(1) +} + +func (s *MockMetastore) Mark(p streams.Processor, src streams.Source, meta streams.Metadata) error { + args := s.Called(p, src, meta) + return args.Error(0) +} + +type MockSupervisor struct { + mock.Mock +} + +func (s *MockSupervisor) Start() error { + args := s.Called() + return args.Error(0) +} + +func (s *MockSupervisor) Close() error { + args := s.Called() + return args.Error(0) +} + +func (s *MockSupervisor) WithPumps(pumps map[streams.Node]streams.Pump) { + s.Called(pumps) +} + +func (s *MockSupervisor) Commit(p streams.Processor) error { + args := s.Called(p) + return args.Error(0) +} + var _ = (streams.TimedPipe)(&MockTimedPipe{}) type MockTimedPipe struct { @@ -53,16 +127,33 @@ type MockPump struct { mock.Mock } -func (p *MockPump) Process(msg *streams.Message) error { +func (p *MockPump) Lock() { + p.Called() +} + +func (p *MockPump) Unlock() { + p.Called() +} + +func (p *MockPump) Accept(msg *streams.Message) error { args := p.Called(msg) return args.Error(0) } +func (p *MockPump) Stop() { + p.Called() +} + func (p *MockPump) Close() error { args := p.Called() return args.Error(0) } +func (p *MockPump) WithLock(func() error) error { + args := p.Called() + return args.Error(0) +} + var _ = (streams.Processor)(&MockProcessor{}) type MockProcessor struct { @@ -83,6 +174,32 @@ func (p *MockProcessor) Close() error { return args.Error(0) } +var _ = (streams.Processor)(&MockCommitter{}) +var _ = (streams.Committer)(&MockCommitter{}) + +type MockCommitter struct { + mock.Mock +} + +func (p *MockCommitter) WithPipe(pipe streams.Pipe) { + p.Called(pipe) +} + +func (p *MockCommitter) Process(msg *streams.Message) error { + args := p.Called(msg) + return args.Error(0) +} + +func (p *MockCommitter) Commit() error { + args := p.Called() + return args.Error(0) +} + +func (p *MockCommitter) Close() error { + args := p.Called() + return args.Error(0) +} + var _ = (streams.Source)(&MockSource{}) type MockSource struct { diff --git a/nanotime.go b/nanotime.go new file mode 100644 index 0000000..4575df3 --- /dev/null +++ b/nanotime.go @@ -0,0 +1,8 @@ +package streams + +import ( + _ "unsafe" +) + +//go:linkname nanotime runtime.nanotime +func nanotime() int64 diff --git a/nanotime.s b/nanotime.s new file mode 100644 index 0000000..e69de29 diff --git a/pipe.go b/pipe.go index 9b46393..dcc5168 100644 --- a/pipe.go +++ b/pipe.go @@ -16,11 +16,13 @@ type TimedPipe interface { // Pipe allows messages to flow through the processors. type Pipe interface { - // Forward queues the data to all processor children in the topology. + // Mark indicates that the message has been delt with + Mark(*Message) error + // Forward queues the message with all processor children in the topology. Forward(*Message) error - // Forward queues the data to the the given processor(s) child in the topology. + // Forward queues the message with the the given processor(inner) child in the topology. ForwardToChild(*Message, int) error - // Commit commits the current state in the sources. + // Commit commits the current state in the related sources. Commit(*Message) error } @@ -28,15 +30,21 @@ var _ = (TimedPipe)(&processorPipe{}) // processorPipe represents the pipe for processors. type processorPipe struct { - children []Pump + store Metastore + supervisor Supervisor + proc Processor + children []Pump duration time.Duration } // NewPipe create a new processorPipe instance. -func NewPipe(children []Pump) Pipe { +func NewPipe(store Metastore, supervisor Supervisor, proc Processor, children []Pump) Pipe { return &processorPipe{ - children: children, + store: store, + supervisor: supervisor, + proc: proc, + children: children, } } @@ -50,45 +58,64 @@ func (p *processorPipe) Duration() time.Duration { return p.duration } +// Mark indicates that the message has been delt with +func (p *processorPipe) Mark(msg *Message) error { + start := nanotime() + + err := p.store.Mark(p.proc, msg.source, msg.metadata) + + p.time(start) + + return err +} + // Forward queues the data to all processor children in the topology. func (p *processorPipe) Forward(msg *Message) error { - defer p.time(time.Now()) + start := nanotime() for _, child := range p.children { - if err := child.Process(msg); err != nil { + if err := child.Accept(msg); err != nil { return err } } + p.time(start) + return nil } -// Forward queues the data to the the given processor(s) child in the topology. +// Forward queues the data to the the given processor(inner) child in the topology. func (p *processorPipe) ForwardToChild(msg *Message, index int) error { - defer p.time(time.Now()) + start := nanotime() if index > len(p.children)-1 { return errors.New("streams: child index out of bounds") } child := p.children[index] - return child.Process(msg) + err := child.Accept(msg) + + p.time(start) + + return err } // Commit commits the current state in the sources. func (p *processorPipe) Commit(msg *Message) error { - defer p.time(time.Now()) + start := nanotime() - for s, v := range msg.Metadata() { - if err := s.Commit(v); err != nil { - return err - } + if err := p.store.Mark(p.proc, msg.source, msg.metadata); err != nil { + return err } - return nil + err := p.supervisor.Commit(p.proc) + + p.time(start) + + return err } // time adds the duration of the function to the pipe accumulative duration. -func (p *processorPipe) time(t time.Time) { - p.duration += time.Since(t) +func (p *processorPipe) time(t int64) { + p.duration += time.Duration(nanotime() - t) //time.Since(t) } diff --git a/pipe_test.go b/pipe_test.go index e94f81f..a2f5c82 100644 --- a/pipe_test.go +++ b/pipe_test.go @@ -3,18 +3,20 @@ package streams_test import ( "testing" - "github.com/msales/streams" + "github.com/msales/streams/v2" "github.com/pkg/errors" "github.com/stretchr/testify/assert" ) func TestProcessorPipe_Duration(t *testing.T) { + store := new(MockMetastore) + supervisor := new(MockSupervisor) msg := streams.NewMessage("test", "test") child1 := new(MockPump) - child1.On("Process", msg).Return(nil) + child1.On("Accept", msg).Return(nil) child2 := new(MockPump) - child2.On("Process", msg).Return(nil) - pipe := streams.NewPipe([]streams.Pump{child1, child2}) + child2.On("Accept", msg).Return(nil) + pipe := streams.NewPipe(store, supervisor, nil, []streams.Pump{child1, child2}) tPipe := pipe.(streams.TimedPipe) err := pipe.Forward(msg) @@ -28,12 +30,14 @@ func TestProcessorPipe_Duration(t *testing.T) { } func TestProcessorPipe_Reset(t *testing.T) { + store := new(MockMetastore) + supervisor := new(MockSupervisor) msg := streams.NewMessage("test", "test") child1 := new(MockPump) - child1.On("Process", msg).Return(nil) + child1.On("Accept", msg).Return(nil) child2 := new(MockPump) - child2.On("Process", msg).Return(nil) - pipe := streams.NewPipe([]streams.Pump{child1, child2}) + child2.On("Accept", msg).Return(nil) + pipe := streams.NewPipe(store, supervisor, nil, []streams.Pump{child1, child2}) tPipe := pipe.(streams.TimedPipe) err := pipe.Forward(msg) @@ -47,13 +51,31 @@ func TestProcessorPipe_Reset(t *testing.T) { } } +func TestProcessorPipe_Mark(t *testing.T) { + proc := new(MockProcessor) + src := new(MockSource) + store := new(MockMetastore) + supervisor := new(MockSupervisor) + meta := new(MockMetadata) + store.On("Mark", proc, src, meta).Return(errors.New("test")) + msg := streams.NewMessage("test", "test").WithMetadata(src, meta) + pipe := streams.NewPipe(store, supervisor, proc, []streams.Pump{}) + + err := pipe.Mark(msg) + + assert.Error(t, err) + store.AssertExpectations(t) +} + func TestProcessorPipe_Forward(t *testing.T) { + store := new(MockMetastore) + supervisor := new(MockSupervisor) msg := streams.NewMessage("test", "test") child1 := new(MockPump) - child1.On("Process", msg).Return(nil) + child1.On("Accept", msg).Return(nil) child2 := new(MockPump) - child2.On("Process", msg).Return(nil) - pipe := streams.NewPipe([]streams.Pump{child1, child2}) + child2.On("Accept", msg).Return(nil) + pipe := streams.NewPipe(store, supervisor, nil, []streams.Pump{child1, child2}) err := pipe.Forward(msg) @@ -63,10 +85,12 @@ func TestProcessorPipe_Forward(t *testing.T) { } func TestProcessorPipe_ForwardError(t *testing.T) { + store := new(MockMetastore) + supervisor := new(MockSupervisor) msg := streams.NewMessage("test", "test") child1 := new(MockPump) - child1.On("Process", msg).Return(errors.New("test")) - pipe := streams.NewPipe([]streams.Pump{child1}) + child1.On("Accept", msg).Return(errors.New("test")) + pipe := streams.NewPipe(store, supervisor, nil, []streams.Pump{child1}) err := pipe.Forward(msg) @@ -75,11 +99,13 @@ func TestProcessorPipe_ForwardError(t *testing.T) { } func TestProcessorPipe_ForwardToChild(t *testing.T) { + store := new(MockMetastore) + supervisor := new(MockSupervisor) msg := streams.NewMessage("test", "test") child1 := new(MockPump) child2 := new(MockPump) - child2.On("Process", msg).Return(nil) - pipe := streams.NewPipe([]streams.Pump{child1, child2}) + child2.On("Accept", msg).Return(nil) + pipe := streams.NewPipe(store, supervisor, nil, []streams.Pump{child1, child2}) err := pipe.ForwardToChild(msg, 1) @@ -88,10 +114,12 @@ func TestProcessorPipe_ForwardToChild(t *testing.T) { } func TestProcessorPipe_ForwardToChildIndexError(t *testing.T) { + store := new(MockMetastore) + supervisor := new(MockSupervisor) msg := streams.NewMessage("test", "test") child1 := new(MockPump) - child1.On("Process", msg).Return(errors.New("test")) - pipe := streams.NewPipe([]streams.Pump{child1}) + child1.On("Accept", msg).Return(errors.New("test")) + pipe := streams.NewPipe(store, supervisor, nil, []streams.Pump{child1}) err := pipe.ForwardToChild(msg, 1) @@ -99,8 +127,10 @@ func TestProcessorPipe_ForwardToChildIndexError(t *testing.T) { } func TestProcessorPipe_ForwardToChildError(t *testing.T) { + store := new(MockMetastore) + supervisor := new(MockSupervisor) msg := streams.NewMessage("test", "test") - pipe := streams.NewPipe([]streams.Pump{}) + pipe := streams.NewPipe(store, supervisor, nil, []streams.Pump{}) err := pipe.ForwardToChild(msg, 1) @@ -109,24 +139,78 @@ func TestProcessorPipe_ForwardToChildError(t *testing.T) { func TestProcessorPipe_Commit(t *testing.T) { src := new(MockSource) - src.On("Commit", interface{}("test")).Return(nil) - msg := streams.NewMessage(nil, nil).WithMetadata(src, "test") - pipe := streams.NewPipe([]streams.Pump{}) + store := new(MockMetastore) + supervisor := new(MockSupervisor) + meta := new(MockMetadata) + proc := new(MockProcessor) + store.On("Mark", proc, src, meta).Return(nil) + supervisor.On("Commit", proc).Return(nil) + msg := streams.NewMessage(nil, nil).WithMetadata(src, meta) + pipe := streams.NewPipe(store, supervisor, proc, []streams.Pump{}) err := pipe.Commit(msg) assert.NoError(t, err) - src.AssertExpectations(t) + store.AssertExpectations(t) } -func TestProcessorPipe_CommitWithError(t *testing.T) { +func TestProcessorPipe_CommitMarkError(t *testing.T) { src := new(MockSource) - src.On("Commit", interface{}("test")).Return(errors.New("test")) - msg := streams.NewMessage(nil, nil).WithMetadata(src, "test") - pipe := streams.NewPipe([]streams.Pump{}) + store := new(MockMetastore) + supervisor := new(MockSupervisor) + meta := new(MockMetadata) + proc := new(MockProcessor) + store.On("Mark", proc, src, meta).Return(errors.New("test")) + msg := streams.NewMessage(nil, nil).WithMetadata(src, meta) + pipe := streams.NewPipe(store, supervisor, proc, []streams.Pump{}) err := pipe.Commit(msg) assert.Error(t, err) - src.AssertExpectations(t) + store.AssertExpectations(t) +} + +func TestProcessorPipe_CommitSupervisorError(t *testing.T) { + src := new(MockSource) + store := new(MockMetastore) + supervisor := new(MockSupervisor) + meta := new(MockMetadata) + proc := new(MockProcessor) + store.On("Mark", proc, src, meta).Return(nil) + supervisor.On("Commit", proc).Return(errors.New("test")) + msg := streams.NewMessage(nil, nil).WithMetadata(src, meta) + pipe := streams.NewPipe(store, supervisor, proc, []streams.Pump{}) + + err := pipe.Commit(msg) + + assert.Error(t, err) + store.AssertExpectations(t) +} + +func BenchmarkProcessorPipe_Mark(b *testing.B) { + store := &fakeMetastore{} + supervisor := &fakeSupervisor{} + proc := &fakeCommitter{} + msg := streams.NewMessage(nil, "test") + pipe := streams.NewPipe(store, supervisor, proc, []streams.Pump{}) + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = pipe.Mark(msg) + } +} + +func BenchmarkProcessorPipe_Commit(b *testing.B) { + store := &fakeMetastore{} + supervisor := &fakeSupervisor{} + proc := &fakeCommitter{} + msg := streams.NewMessage(nil, "test") + pipe := streams.NewPipe(store, supervisor, proc, []streams.Pump{}) + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = pipe.Commit(msg) + } } diff --git a/processor.go b/processor.go index dfec197..9052aa0 100644 --- a/processor.go +++ b/processor.go @@ -4,6 +4,14 @@ import ( "fmt" ) +// Committer represents a processor that can commit. +type Committer interface { + Processor + + //Commit commits a processors batch. + Commit() error +} + // Processor represents a stream processor. type Processor interface { // WithPipe sets the pipe on the Processor. @@ -14,26 +22,65 @@ type Processor interface { Close() error } -// Mapper represents a mapping function. -type Mapper func(*Message) (*Message, error) +// Mapper represents a message transformer. +type Mapper interface { + // Map transforms a message into a new value. + Map(*Message) (*Message, error) +} + +// FlatMapper represents a transformer that returns zero or many messages. +type FlatMapper interface { + // FlatMap transforms a message into multiple messages. + FlatMap(*Message) ([]*Message, error) +} + +// Predicate represents a predicate (boolean-valued function) of a message. +type Predicate interface { + // Assert tests if the given message satisfies the predicate. + Assert(*Message) (bool, error) +} -// FlatMapper represents a mapping function that return multiple messages. -type FlatMapper func(*Message) ([]*Message, error) +var _ = (Mapper)(MapperFunc(nil)) + +// MapperFunc represents a function implementing the Mapper interface. +type MapperFunc func(*Message) (*Message, error) + +// Map transforms a message into a new value. +func (fn MapperFunc) Map(msg *Message) (*Message, error) { + return fn(msg) +} + +var _ = (FlatMapper)(FlatMapperFunc(nil)) + +// FlatMapperFunc represents a function implementing the FlatMapper interface. +type FlatMapperFunc func(*Message) ([]*Message, error) + +// FlatMap transforms a message into multiple messages. +func (fn FlatMapperFunc) FlatMap(msg *Message) ([]*Message, error) { + return fn(msg) +} -// Predicate represents a stream filter function. -type Predicate func(*Message) (bool, error) +var _ = (Predicate)(PredicateFunc(nil)) + +// PredicateFunc represents a function implementing the Predicate interface. +type PredicateFunc func(*Message) (bool, error) + +// Assert tests if the given message satisfies the predicate. +func (fn PredicateFunc) Assert(msg *Message) (bool, error) { + return fn(msg) +} // BranchProcessor is a processor that branches into one or more streams // based on the results of the predicates. type BranchProcessor struct { - pipe Pipe - fns []Predicate + pipe Pipe + preds []Predicate } // NewBranchProcessor creates a new BranchProcessor instance. -func NewBranchProcessor(fns []Predicate) Processor { +func NewBranchProcessor(preds []Predicate) Processor { return &BranchProcessor{ - fns: fns, + preds: preds, } } @@ -44,8 +91,8 @@ func (p *BranchProcessor) WithPipe(pipe Pipe) { // Process processes the stream nodeMessage. func (p *BranchProcessor) Process(msg *Message) error { - for i, fn := range p.fns { - ok, err := fn(msg) + for i, pred := range p.preds { + ok, err := pred.Assert(msg) if err != nil { return err } @@ -70,13 +117,13 @@ func (p *BranchProcessor) Close() error { // FilterProcessor is a processor that filters a stream using a predicate function. type FilterProcessor struct { pipe Pipe - fn Predicate + pred Predicate } // NewFilterProcessor creates a new FilterProcessor instance. -func NewFilterProcessor(fn Predicate) Processor { +func NewFilterProcessor(pred Predicate) Processor { return &FilterProcessor{ - fn: fn, + pred: pred, } } @@ -87,7 +134,7 @@ func (p *FilterProcessor) WithPipe(pipe Pipe) { // Process processes the stream Message. func (p *FilterProcessor) Process(msg *Message) error { - ok, err := p.fn(msg) + ok, err := p.pred.Assert(msg) if err != nil { return err } @@ -95,7 +142,8 @@ func (p *FilterProcessor) Process(msg *Message) error { if ok { return p.pipe.Forward(msg) } - return nil + + return p.pipe.Mark(msg) } // Close closes the processor. @@ -105,14 +153,14 @@ func (p *FilterProcessor) Close() error { // FlatMapProcessor is a processor that maps a stream using a flat mapping function. type FlatMapProcessor struct { - pipe Pipe - fn FlatMapper + pipe Pipe + mapper FlatMapper } // NewFlatMapProcessor creates a new FlatMapProcessor instance. -func NewFlatMapProcessor(fn FlatMapper) Processor { +func NewFlatMapProcessor(mapper FlatMapper) Processor { return &FlatMapProcessor{ - fn: fn, + mapper: mapper, } } @@ -123,7 +171,7 @@ func (p *FlatMapProcessor) WithPipe(pipe Pipe) { // Process processes the stream Message. func (p *FlatMapProcessor) Process(msg *Message) error { - msgs, err := p.fn(msg) + msgs, err := p.mapper.FlatMap(msg) if err != nil { return err } @@ -144,14 +192,14 @@ func (p *FlatMapProcessor) Close() error { // MapProcessor is a processor that maps a stream using a mapping function. type MapProcessor struct { - pipe Pipe - fn Mapper + pipe Pipe + mapper Mapper } // NewMapProcessor creates a new MapProcessor instance. -func NewMapProcessor(fn Mapper) Processor { +func NewMapProcessor(mapper Mapper) Processor { return &MapProcessor{ - fn: fn, + mapper: mapper, } } @@ -162,7 +210,7 @@ func (p *MapProcessor) WithPipe(pipe Pipe) { // Process processes the stream Message. func (p *MapProcessor) Process(msg *Message) error { - msg, err := p.fn(msg) + msg, err := p.mapper.Map(msg) if err != nil { return err } @@ -175,19 +223,14 @@ func (p *MapProcessor) Close() error { return nil } -// MergeProcessor is a processor that passes the message on, -// keeping track of seen metadata. +// MergeProcessor is a processor that merges multiple streams. type MergeProcessor struct { - metadata map[Source]interface{} - pipe Pipe } // NewMergeProcessor creates a new MergeProcessor instance. func NewMergeProcessor() Processor { - return &MergeProcessor{ - metadata: map[Source]interface{}{}, - } + return &MergeProcessor{} } // WithPipe sets the pipe on the Processor. @@ -197,16 +240,6 @@ func (p *MergeProcessor) WithPipe(pipe Pipe) { // Process processes the stream Message. func (p *MergeProcessor) Process(msg *Message) error { - // Update the internal metadata state - for s, v := range msg.Metadata() { - p.metadata[s] = v - } - - // Attach metadata to the message - for s, v := range p.metadata { - msg.WithMetadata(s, v) - } - return p.pipe.Forward(msg) } diff --git a/processor_test.go b/processor_test.go index cbc6932..1f9a514 100644 --- a/processor_test.go +++ b/processor_test.go @@ -3,19 +3,19 @@ package streams_test import ( "testing" - "github.com/msales/streams" - "github.com/msales/streams/mocks" + "github.com/msales/streams/v2" + "github.com/msales/streams/v2/mocks" "github.com/pkg/errors" "github.com/stretchr/testify/assert" ) func TestBranchProcessor_Process(t *testing.T) { - truePred := func(msg *streams.Message) (bool, error) { + truePred := streams.PredicateFunc(func(msg *streams.Message) (bool, error) { return true, nil - } - falsePred := func(msg *streams.Message) (bool, error) { + }) + falsePred := streams.PredicateFunc(func(msg *streams.Message) (bool, error) { return false, nil - } + }) pipe := mocks.NewPipe(t) pipe.ExpectForwardToChild("test", "test", 0) p := streams.NewBranchProcessor([]streams.Predicate{truePred, falsePred}) @@ -28,9 +28,9 @@ func TestBranchProcessor_Process(t *testing.T) { } func TestBranchProcessor_ProcessWithError(t *testing.T) { - errPred := func(msg *streams.Message) (bool, error) { + errPred := streams.PredicateFunc(func(msg *streams.Message) (bool, error) { return true, errors.New("test") - } + }) pipe := mocks.NewPipe(t) p := streams.NewBranchProcessor([]streams.Predicate{errPred}) p.WithPipe(pipe) @@ -41,9 +41,9 @@ func TestBranchProcessor_ProcessWithError(t *testing.T) { } func TestBranchProcessor_ProcessWithForwardError(t *testing.T) { - pred := func(msg *streams.Message) (bool, error) { + pred := streams.PredicateFunc(func(msg *streams.Message) (bool, error) { return true, nil - } + }) pipe := mocks.NewPipe(t) pipe.ExpectForwardToChild("test", "test", 0) pipe.ShouldError() @@ -64,14 +64,15 @@ func TestBranchProcessor_Close(t *testing.T) { } func TestFilterProcessor_Process(t *testing.T) { - pred := func(msg *streams.Message) (bool, error) { + pred := streams.PredicateFunc(func(msg *streams.Message) (bool, error) { if _, ok := msg.Key.(string); ok { return true, nil } return false, nil - } + }) pipe := mocks.NewPipe(t) + pipe.ExpectMark(1, 1) pipe.ExpectForward("test", "test") p := streams.NewFilterProcessor(pred) p.WithPipe(pipe) @@ -83,9 +84,9 @@ func TestFilterProcessor_Process(t *testing.T) { } func TestFilterProcessor_ProcessWithError(t *testing.T) { - errPred := func(msg *streams.Message) (bool, error) { + errPred := streams.PredicateFunc(func(msg *streams.Message) (bool, error) { return true, errors.New("test") - } + }) pipe := mocks.NewPipe(t) p := streams.NewFilterProcessor(errPred) p.WithPipe(pipe) @@ -104,9 +105,9 @@ func TestFilterProcessor_Close(t *testing.T) { } func TestMapProcessor_Process(t *testing.T) { - mapper := func(msg *streams.Message) (*streams.Message, error) { + mapper := streams.MapperFunc(func(msg *streams.Message) (*streams.Message, error) { return streams.NewMessage(1, 1), nil - } + }) pipe := mocks.NewPipe(t) pipe.ExpectForward(1, 1) p := streams.NewMapProcessor(mapper) @@ -118,9 +119,9 @@ func TestMapProcessor_Process(t *testing.T) { } func TestMapProcessor_ProcessWithError(t *testing.T) { - mapper := func(msg *streams.Message) (*streams.Message, error) { + mapper := streams.MapperFunc(func(msg *streams.Message) (*streams.Message, error) { return nil, errors.New("test") - } + }) pipe := mocks.NewPipe(t) p := streams.NewMapProcessor(mapper) p.WithPipe(pipe) @@ -139,12 +140,12 @@ func TestMapProcessor_Close(t *testing.T) { } func TestFlatMapProcessor_Process(t *testing.T) { - mapper := func(msg *streams.Message) ([]*streams.Message, error) { + mapper := streams.FlatMapperFunc(func(msg *streams.Message) ([]*streams.Message, error) { return []*streams.Message{ streams.NewMessage(1, 1), streams.NewMessage(2, 2), }, nil - } + }) pipe := mocks.NewPipe(t) pipe.ExpectForward(1, 1) pipe.ExpectForward(2, 2) @@ -157,9 +158,9 @@ func TestFlatMapProcessor_Process(t *testing.T) { } func TestFlatMapProcessor_ProcessWithError(t *testing.T) { - mapper := func(msg *streams.Message) ([]*streams.Message, error) { + mapper := streams.FlatMapperFunc(func(msg *streams.Message) ([]*streams.Message, error) { return nil, errors.New("test") - } + }) pipe := mocks.NewPipe(t) p := streams.NewFlatMapProcessor(mapper) p.WithPipe(pipe) @@ -169,13 +170,13 @@ func TestFlatMapProcessor_ProcessWithError(t *testing.T) { assert.Error(t, err) } -func TestFlatMapProcessor_ProcessWithForawrdError(t *testing.T) { - mapper := func(msg *streams.Message) ([]*streams.Message, error) { +func TestFlatMapProcessor_ProcessWithForwardError(t *testing.T) { + mapper := streams.FlatMapperFunc(func(msg *streams.Message) ([]*streams.Message, error) { return []*streams.Message{ streams.NewMessage(1, 1), streams.NewMessage(2, 2), }, nil - } + }) pipe := mocks.NewPipe(t) pipe.ExpectForward(1, 1) pipe.ShouldError() @@ -207,29 +208,6 @@ func TestMergeProcessor_Process(t *testing.T) { pipe.AssertExpectations() } -func TestMergeProcessor_ProcessMergesMetadata(t *testing.T) { - src1 := new(MockSource) - src2 := new(MockSource) - pipe := mocks.NewPipe(t) - pipe.ExpectForward(nil, "test") - pipe.ExpectForward("test", "test") - - p := streams.NewMergeProcessor() - p.WithPipe(pipe) - - p.Process(streams.NewMessage(nil, "test").WithMetadata(src1, "test1")) - p.Process(streams.NewMessage("test", "test").WithMetadata(src2, "test2")) - - msgs := pipe.Messages() - assert.Len(t, msgs, 2) - msg := msgs[1].Msg - assert.Len(t, msg.Metadata(), 2) - assert.Equal(t, "test1", msg.Metadata()[src1]) - assert.Equal(t, "test2", msg.Metadata()[src2]) - - pipe.AssertExpectations() -} - func TestMergeProcessor_Close(t *testing.T) { p := streams.NewMergeProcessor() diff --git a/pump.go b/pump.go index 870113c..77f4ec1 100644 --- a/pump.go +++ b/pump.go @@ -5,19 +5,25 @@ import ( "sync" "time" - "github.com/msales/pkg/stats" + "github.com/msales/pkg/v3/stats" ) // Pump represent a Message pump. type Pump interface { - // Process processes a message in the Pump. - Process(*Message) error + sync.Locker + + // Accept takes a message to be processed in the Pump. + Accept(*Message) error + // Stop stops the pump. + Stop() // Close closes the pump. Close() error } // processorPump is an asynchronous Message Pump. type processorPump struct { + sync.Mutex + name string processor Processor pipe TimedPipe @@ -47,37 +53,51 @@ func (p *processorPump) run() { p.wg.Add(1) defer p.wg.Done() + tags := []interface{}{"name", p.name} + for msg := range p.ch { p.pipe.Reset() - start := time.Now() - if err := p.processor.Process(msg); err != nil { + p.Lock() + + start := nanotime() + err := p.processor.Process(msg) + if err != nil { + p.Unlock() p.errFn(err) + return } + latency := time.Duration(nanotime()-start) - p.pipe.Duration() + + p.Unlock() - latency := time.Since(start) - p.pipe.Duration() withStats(msg.Ctx, func(s stats.Stats) { - s.Timing("node.latency", latency, 0.1, "name", p.name) - s.Inc("node.throughput", 1, 0.1, "name", p.name) - s.Gauge("node.back-pressure", pressure(p.ch), 0.1, "name", p.name) + s.Timing("node.latency", latency, 0.1, tags...) + s.Inc("node.throughput", 1, 0.1, tags...) + s.Gauge("node.back-pressure", pressure(p.ch), 0.1, tags...) }) } } -// Process processes a message in the Pump. -func (p *processorPump) Process(msg *Message) error { +// Accept takes a message to be processed in the Pump. +func (p *processorPump) Accept(msg *Message) error { p.ch <- msg return nil } -// Close closes the pump. -func (p *processorPump) Close() error { +// Stop stops the pump, but does not close it. +func (p *processorPump) Stop() { close(p.ch) p.wg.Wait() +} +// Close closes the pump. +// +// Stop must be called before closing the pump. +func (p *processorPump) Close() error { return p.processor.Close() } @@ -137,12 +157,14 @@ func (p *sourcePump) run() { p.wg.Add(1) defer p.wg.Done() + tags := []interface{}{"name", p.name} + for { select { case <-p.quit: return default: - start := time.Now() + start := nanotime() msg, err := p.source.Consume() if err != nil { @@ -154,14 +176,14 @@ func (p *sourcePump) run() { continue } - latency := time.Since(start) + latency := time.Duration(nanotime() - start) withStats(msg.Ctx, func(s stats.Stats) { - s.Timing("node.latency", latency, 0.1, "name", p.name) - s.Inc("node.throughput", 1, 0.1, "name", p.name) + s.Timing("node.latency", latency, 0.1, tags...) + s.Inc("node.throughput", 1, 0.1, tags...) }) for _, pump := range p.pumps { - err = pump.Process(msg) + err = pump.Accept(msg) if err != nil { go p.errFn(err) return diff --git a/pump_test.go b/pump_test.go index a54d434..a7e9c7d 100644 --- a/pump_test.go +++ b/pump_test.go @@ -6,12 +6,12 @@ import ( "testing" "time" - "github.com/msales/pkg/stats" - "github.com/msales/streams" + "github.com/msales/pkg/v3/stats" + "github.com/msales/streams/v2" "github.com/stretchr/testify/assert" ) -func TestProcessorPump_Process(t *testing.T) { +func TestProcessorPump_Accept(t *testing.T) { ctx := stats.WithStats(context.Background(), stats.Null) msg := streams.NewMessageWithContext(ctx, "test", "test") processor := new(MockProcessor) @@ -24,7 +24,7 @@ func TestProcessorPump_Process(t *testing.T) { p := streams.NewPump(node, pipe, func(error) {}) defer p.Close() - err := p.Process(msg) + err := p.Accept(msg) time.Sleep(time.Millisecond) @@ -32,7 +32,7 @@ func TestProcessorPump_Process(t *testing.T) { processor.AssertExpectations(t) } -func TestProcessorPump_ProcessError(t *testing.T) { +func TestProcessorPump_AcceptError(t *testing.T) { var err error msg := streams.NewMessage("test", "test") @@ -48,7 +48,7 @@ func TestProcessorPump_ProcessError(t *testing.T) { }) defer p.Close() - p.Process(msg) + p.Accept(msg) time.Sleep(time.Millisecond) @@ -87,7 +87,7 @@ func TestSourcePump_CanConsume(t *testing.T) { source.On("Consume").Maybe().Return(msg, nil) source.On("Close").Return(nil) pump := new(MockPump) - pump.On("Process", msg).Return(nil) + pump.On("Accept", msg).Return(nil) p := streams.NewSourcePump("test", source, []streams.Pump{pump}, func(error) {}) defer p.Close() defer p.Stop() @@ -104,7 +104,7 @@ func TestSourcePump_HandlesPumpError(t *testing.T) { source.On("Consume").Maybe().Return(msg, nil) source.On("Close").Return(nil) pump := new(MockPump) - pump.On("Process", msg).Return(errors.New("test")) + pump.On("Accept", msg).Return(errors.New("test")) p := streams.NewSourcePump("test", source, []streams.Pump{pump}, func(error) { gotError = true }) diff --git a/sql/sink.go b/sql/sink.go index 6d96764..27dff56 100644 --- a/sql/sink.go +++ b/sql/sink.go @@ -3,48 +3,30 @@ package sql import ( "database/sql" "errors" - "time" - "github.com/msales/streams" + "github.com/msales/streams/v2" ) -// TxFunc represents a function that receives a sql transaction. -type TxFunc func(*sql.Tx) error - -// InsertFunc represents a callback to handle processing a Message on the Sink. -type InsertFunc func(*sql.Tx, *streams.Message) error - -// SinkFunc represents a function that configures the Sink. -type SinkFunc func(*Sink) - -// WithBatchMessages configures the number of messages to send in a batch -// on the Sink. -func WithBatchMessages(messages int) SinkFunc { - return func(s *Sink) { - s.batch = messages - } +// Transaction represents a SQL transaction handler. +type Transaction interface { + // Begin handles the start of a SQL transaction. + Begin(*sql.Tx) error + // Commit handles the commit of a SQL transaction. + Commit(*sql.Tx) error } -// WithBatchFrequency configures the frequency to send a batch -// on the Sink. -func WithBatchFrequency(freq time.Duration) SinkFunc { - return func(s *Sink) { - s.freq = freq - } +// Executor represents a SQL query executor. +type Executor interface { + // Exec executes a query on the given transaction. + Exec(*sql.Tx, *streams.Message) error } -// WithBeginFn sets the transaction start callback on the Sink. -func WithBeginFn(fn TxFunc) SinkFunc { - return func(s *Sink) { - s.beginFn = fn - } -} +// ExecFunc represents a function implementing an Executor. +type ExecFunc func(*sql.Tx, *streams.Message) error -// WithCommitFn sets the transaction commit callback on the Sink. -func WithCommitFn(fn TxFunc) SinkFunc { - return func(s *Sink) { - s.commitFn = fn - } +// Exec executes a query on the given transaction. +func (fn ExecFunc) Exec(tx *sql.Tx, msg *streams.Message) error { + return fn(tx, msg) } // Sink represents a SQL sink processor. @@ -54,33 +36,28 @@ type Sink struct { db *sql.DB tx *sql.Tx - beginFn TxFunc - insertFn InsertFunc - commitFn TxFunc + exec Executor + txHdlr Transaction - batch int - count int - freq time.Duration - lastCommit time.Time - lastMsg *streams.Message + batch int + count int } // NewSink creates a new batch sql insert sink. -func NewSink(db *sql.DB, fn InsertFunc, opts ...SinkFunc) (*Sink, error) { +func NewSink(db *sql.DB, batch int, exec Executor) (*Sink, error) { s := &Sink{ - db: db, - insertFn: fn, - batch: 0, - count: 0, - freq: 0, + db: db, + exec: exec, + batch: batch, + count: 0, } - for _, opt := range opts { - opt(s) + if txHdlr, ok := exec.(Transaction); ok { + s.txHdlr = txHdlr } - if s.batch == 0 && s.freq == 0 { - return nil, errors.New("sink: neither BatchMessages nor BatchFrequency was set") + if s.batch == 0 { + return nil, errors.New("sink: batch must be greater then zero") } return s, nil @@ -97,31 +74,23 @@ func (p *Sink) Process(msg *streams.Message) error { return err } - if err := p.insertFn(p.tx, msg); err != nil { + if err := p.exec.Exec(p.tx, msg); err != nil { return err } - p.lastMsg = msg p.count++ - if p.shouldCommit() { - p.count = 0 - p.lastCommit = time.Now() - return p.commitTransaction(msg) + if p.count >= p.batch { + return p.pipe.Commit(msg) } - return nil + return p.pipe.Mark(msg) } -func (p *Sink) shouldCommit() bool { - if p.batch > 0 && p.count >= p.batch { - return true - } - - if p.freq > 0 && time.Since(p.lastCommit) > p.freq { - return true - } +//Commit commits a processors batch. +func (p *Sink) Commit() error { + p.count = 0 - return false + return p.commitTransaction() } func (p *Sink) ensureTransaction() error { @@ -136,41 +105,36 @@ func (p *Sink) ensureTransaction() error { return err } - if p.beginFn != nil { - return p.beginFn(p.tx) - } - - if p.lastCommit.IsZero() { - p.lastCommit = time.Now() + if p.txHdlr != nil { + return p.txHdlr.Begin(p.tx) } return nil } -func (p *Sink) commitTransaction(msg *streams.Message) error { +func (p *Sink) commitTransaction() error { if p.tx == nil { return nil } - if p.commitFn != nil { - if err := p.commitFn(p.tx); err != nil { + if p.txHdlr != nil { + if err := p.txHdlr.Commit(p.tx); err != nil { return err } } if err := p.tx.Commit(); err != nil { - p.tx.Rollback() + _ = p.tx.Rollback() return err } p.tx = nil - - return p.pipe.Commit(msg) + return nil } // Close closes the processor. func (p *Sink) Close() error { - if err := p.commitTransaction(p.lastMsg); err != nil { - return err + if p.tx != nil { + _ = p.tx.Rollback() } return p.db.Close() diff --git a/sql/sink_test.go b/sql/sink_test.go index d4e4ff7..c945274 100644 --- a/sql/sink_test.go +++ b/sql/sink_test.go @@ -1,311 +1,379 @@ package sql_test import ( - sql2 "database/sql" + "database/sql" + "errors" "testing" - "time" - "github.com/msales/streams" - "github.com/msales/streams/mocks" - "github.com/msales/streams/sql" - "github.com/pkg/errors" + "github.com/msales/streams/v2" + "github.com/msales/streams/v2/mocks" + sqlx "github.com/msales/streams/v2/sql" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" "gopkg.in/DATA-DOG/go-sqlmock.v1" ) -func TestNewSink(t *testing.T) { - db, _, err := sqlmock.New() +func newDB(t *testing.T) (*sql.DB, sqlmock.Sqlmock) { + db, m, err := sqlmock.New() assert.NoError(t, err) - defer db.Close() - s, err := sql.NewSink(db, func(*sql2.Tx, *streams.Message) error { + return db, m +} + +func TestExecFunc_ImplementsExecutor(t *testing.T) { + exec := sqlx.ExecFunc(func(*sql.Tx, *streams.Message) error { return nil - }, sql.WithBatchMessages(1)) + }) - assert.NoError(t, err) - assert.IsType(t, &sql.Sink{}, s) + assert.Implements(t, (*sqlx.Executor)(nil), exec) } -func TestNewSinkMustHaveBatchOrFrequency(t *testing.T) { - db, _, err := sqlmock.New() +func TestExecFunc_Exec(t *testing.T) { + called := false + exec := sqlx.ExecFunc(func(*sql.Tx, *streams.Message) error { + called = true + return nil + }) + + err := exec.Exec(nil, nil) + assert.NoError(t, err) - defer db.Close() + assert.True(t, called) +} - _, err = sql.NewSink(db, func(*sql2.Tx, *streams.Message) error { - return nil +func TestExecFunc_ExecError(t *testing.T) { + exec := sqlx.ExecFunc(func(*sql.Tx, *streams.Message) error { + return errors.New("test") }) + err := exec.Exec(nil, nil) + assert.Error(t, err) } -func TestSink_Process(t *testing.T) { - insertCalled := false +func TestNewSink(t *testing.T) { + db, _ := newDB(t) + defer db.Close() + + exec := new(MockExecutor) + + s, err := sqlx.NewSink(db, 1, exec) - db, mock, err := sqlmock.New() assert.NoError(t, err) + assert.IsType(t, &sqlx.Sink{}, s) +} + +func TestNewSinkMustHaveBatch(t *testing.T) { + db, _ := newDB(t) + defer db.Close() + + exec := new(MockExecutor) + + _, err := sqlx.NewSink(db, 0, exec) + + assert.Error(t, err) +} + +func TestSink_Process(t *testing.T) { + db, dbMock := newDB(t) defer db.Close() + dbMock.ExpectBegin() - mock.ExpectBegin() + exec := new(MockExecutor) + exec.On("Exec", mock.Anything, mock.Anything).Return(nil) pipe := mocks.NewPipe(t) - s, _ := sql.NewSink(db, func(*sql2.Tx, *streams.Message) error { - insertCalled = true - return nil - }, sql.WithBatchMessages(10)) + pipe.ExpectMark("test", "test") + + s, _ := sqlx.NewSink(db, 10, exec) s.WithPipe(pipe) - err = s.Process(streams.NewMessage("test", "test")) + msg := streams.NewMessage("test", "test") + + err := s.Process(msg) assert.NoError(t, err) - assert.True(t, insertCalled) - if err := mock.ExpectationsWereMet(); err != nil { + exec.AssertCalled(t, "Exec", mock.Anything, msg) + if err := dbMock.ExpectationsWereMet(); err != nil { t.Errorf("There were unfulfilled expectations: %s", err) } } func TestSink_ProcessWithTxError(t *testing.T) { - db, mock, err := sqlmock.New() - assert.NoError(t, err) + db, dbMock := newDB(t) defer db.Close() + dbMock.ExpectBegin().WillReturnError(errors.New("test error")) - mock.ExpectBegin().WillReturnError(errors.New("test error")) - + exec := new(MockExecutor) pipe := mocks.NewPipe(t) - s, _ := sql.NewSink(db, func(*sql2.Tx, *streams.Message) error { - return nil - }, sql.WithBatchMessages(10)) + s, _ := sqlx.NewSink(db, 10, exec) s.WithPipe(pipe) - err = s.Process(streams.NewMessage("test", "test")) + err := s.Process(streams.NewMessage("test", "test")) assert.Error(t, err) - if err := mock.ExpectationsWereMet(); err != nil { + if err := dbMock.ExpectationsWereMet(); err != nil { t.Errorf("There were unfulfilled expectations: %s", err) } } func TestSink_ProcessWithInsertError(t *testing.T) { - db, mock, err := sqlmock.New() - assert.NoError(t, err) + db, dbMock := newDB(t) defer db.Close() + dbMock.ExpectBegin() - mock.ExpectBegin() + exec := new(MockExecutor) + exec.On("Exec", mock.Anything, mock.Anything).Return(errors.New("test")) pipe := mocks.NewPipe(t) - s, _ := sql.NewSink(db, func(*sql2.Tx, *streams.Message) error { - return errors.New("test error") - }, sql.WithBatchMessages(10)) + s, _ := sqlx.NewSink(db, 10, exec) s.WithPipe(pipe) - err = s.Process(streams.NewMessage("test", "test")) + err := s.Process(streams.NewMessage("test", "test")) assert.Error(t, err) - if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("There were unfulfilled expectations: %s", err) - } } -func TestSink_ProcessWithTxFuncs(t *testing.T) { - beginCalled := false - insertCalled := false - commitCalled := false - - db, mock, err := sqlmock.New() - assert.NoError(t, err) +func TestSink_ProcessWithTxBegin(t *testing.T) { + db, dbMock := newDB(t) defer db.Close() + dbMock.ExpectBegin() - mock.ExpectBegin() - mock.ExpectCommit() + exec := new(MockExecutorTx) + exec.On("Begin", mock.Anything).Return(nil) + exec.On("Exec", mock.Anything, mock.Anything).Return(nil) pipe := mocks.NewPipe(t) - pipe.ExpectCommit() + pipe.ExpectMark("test", "test") - s, _ := sql.NewSink(db, func(*sql2.Tx, *streams.Message) error { - insertCalled = true - return nil - }, sql.WithBatchMessages(1), sql.WithBeginFn(func(tx *sql2.Tx) error { - beginCalled = true - return nil - }), sql.WithCommitFn(func(tx *sql2.Tx) error { - commitCalled = true - return nil - })) + s, _ := sqlx.NewSink(db, 10, exec) s.WithPipe(pipe) - err = s.Process(streams.NewMessage("test", "test")) + err := s.Process(streams.NewMessage("test", "test")) assert.NoError(t, err) - assert.True(t, beginCalled) - assert.True(t, insertCalled) - assert.True(t, commitCalled) - pipe.AssertExpectations() - if err := mock.ExpectationsWereMet(); err != nil { + exec.AssertExpectations(t) + if err := dbMock.ExpectationsWereMet(); err != nil { t.Errorf("There were unfulfilled expectations: %s", err) } } -func TestSink_ProcessWithBeginFuncError(t *testing.T) { - db, mock, err := sqlmock.New() - assert.NoError(t, err) +func TestSink_ProcessWithTxBeginError(t *testing.T) { + db, dbMock := newDB(t) defer db.Close() + dbMock.ExpectBegin() - mock.ExpectBegin() + exec := new(MockExecutorTx) + exec.On("Begin", mock.Anything).Return(errors.New("test")) pipe := mocks.NewPipe(t) - s, _ := sql.NewSink(db, func(*sql2.Tx, *streams.Message) error { - return nil - }, sql.WithBatchMessages(10), sql.WithBeginFn(func(tx *sql2.Tx) error { - return errors.New("test error") - })) + s, _ := sqlx.NewSink(db, 1, exec) s.WithPipe(pipe) - err = s.Process(streams.NewMessage("test", "test")) + err := s.Process(streams.NewMessage("test", "test")) assert.Error(t, err) - if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("There were unfulfilled expectations: %s", err) - } } -func TestSink_ProcessWithCommitFuncError(t *testing.T) { - db, mock, err := sqlmock.New() - assert.NoError(t, err) +func TestSink_ProcessWithMessageCommit(t *testing.T) { + db, dbMock := newDB(t) defer db.Close() + dbMock.ExpectBegin() - mock.ExpectBegin() + exec := new(MockExecutor) + exec.On("Exec", mock.Anything, mock.Anything).Return(nil) pipe := mocks.NewPipe(t) - s, _ := sql.NewSink(db, func(*sql2.Tx, *streams.Message) error { - return nil - }, sql.WithBatchMessages(1), sql.WithCommitFn(func(tx *sql2.Tx) error { - return errors.New("test error") - })) + pipe.ExpectMark("test", "test") + pipe.ExpectCommit() + + s, _ := sqlx.NewSink(db, 2, exec) s.WithPipe(pipe) - err = s.Process(streams.NewMessage("test", "test")) + _ = s.Process(streams.NewMessage("test", "test")) + err := s.Process(streams.NewMessage("test1", "test1")) - assert.Error(t, err) - if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("There were unfulfilled expectations: %s", err) - } + assert.NoError(t, err) + pipe.AssertExpectations() } -func TestSink_ProcessWithMessageCommit(t *testing.T) { - db, mock, err := sqlmock.New() - assert.NoError(t, err) +func TestSink_Commit(t *testing.T) { + db, dbMock := newDB(t) defer db.Close() + dbMock.ExpectBegin() + dbMock.ExpectCommit() - mock.ExpectBegin() - mock.ExpectCommit() + exec := new(MockExecutor) + exec.On("Exec", mock.Anything, mock.Anything).Return(nil) pipe := mocks.NewPipe(t) - pipe.ExpectCommit() + pipe.ExpectMark(mocks.Anything, mocks.Anything) - s, _ := sql.NewSink(db, func(*sql2.Tx, *streams.Message) error { - return nil - }, sql.WithBatchMessages(2)) + s, _ := sqlx.NewSink(db, 2, exec) s.WithPipe(pipe) + _ = s.Process(streams.NewMessage("test1", "test1")) - s.Process(streams.NewMessage("test", "test")) - err = s.Process(streams.NewMessage("test", "test")) + err := s.Commit() assert.NoError(t, err) - pipe.AssertExpectations() - if err := mock.ExpectationsWereMet(); err != nil { + if err := dbMock.ExpectationsWereMet(); err != nil { t.Errorf("There were unfulfilled expectations: %s", err) } } -func TestSink_ProcessWithFrequencyCommit(t *testing.T) { - db, mock, err := sqlmock.New() +func TestSink_CommitNoTransaction(t *testing.T) { + db, _ := newDB(t) + defer db.Close() + + exec := new(MockExecutor) + pipe := mocks.NewPipe(t) + s, _ := sqlx.NewSink(db, 2, exec) + s.WithPipe(pipe) + + err := s.Commit() + assert.NoError(t, err) +} + +func TestSink_CommitTxCommit(t *testing.T) { + db, dbMock := newDB(t) defer db.Close() + dbMock.ExpectBegin() + dbMock.ExpectCommit() - mock.ExpectBegin() - mock.ExpectCommit() + exec := new(MockExecutorTx) + exec.On("Begin", mock.Anything).Return(nil) + exec.On("Exec", mock.Anything, mock.Anything).Return(nil) + exec.On("Commit", mock.Anything).Return(nil) pipe := mocks.NewPipe(t) - pipe.ExpectCommit() + pipe.ExpectMark(mocks.Anything, mocks.Anything) - s, _ := sql.NewSink(db, func(*sql2.Tx, *streams.Message) error { - return nil - }, sql.WithBatchFrequency(time.Millisecond)) + s, _ := sqlx.NewSink(db, 2, exec) s.WithPipe(pipe) + _ = s.Process(streams.NewMessage("test1", "test1")) - s.Process(streams.NewMessage("test", "test")) - time.Sleep(time.Millisecond) - err = s.Process(streams.NewMessage("test", "test")) + err := s.Commit() assert.NoError(t, err) - pipe.AssertExpectations() - if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("There were unfulfilled expectations: %s", err) - } + exec.AssertExpectations(t) } -func TestSink_ProcessWithCommitError(t *testing.T) { - db, mock, err := sqlmock.New() - assert.NoError(t, err) +func TestSink_CommitTxCommitError(t *testing.T) { + db, dbMock := newDB(t) defer db.Close() + dbMock.ExpectBegin() - mock.ExpectBegin() - mock.ExpectCommit().WillReturnError(errors.New("test error")) + exec := new(MockExecutorTx) + exec.On("Begin", mock.Anything).Return(nil) + exec.On("Exec", mock.Anything, mock.Anything).Return(nil) + exec.On("Commit", mock.Anything).Return(errors.New("test")) pipe := mocks.NewPipe(t) - s, _ := sql.NewSink(db, func(*sql2.Tx, *streams.Message) error { - return nil - }, sql.WithBatchMessages(1)) + pipe.ExpectMark(mocks.Anything, mocks.Anything) + + s, _ := sqlx.NewSink(db, 2, exec) + s.WithPipe(pipe) + _ = s.Process(streams.NewMessage("test1", "test1")) + + err := s.Commit() + + assert.Error(t, err) +} + +func TestSink_CommitDBError(t *testing.T) { + db, dbMock := newDB(t) + defer db.Close() + dbMock.ExpectBegin() + dbMock.ExpectCommit().WillReturnError(errors.New("test error")) + + exec := new(MockExecutorTx) + exec.On("Begin", mock.Anything).Return(nil) + exec.On("Exec", mock.Anything, mock.Anything).Return(nil) + exec.On("Commit", mock.Anything).Return(nil) + + pipe := mocks.NewPipe(t) + pipe.ExpectMark(mocks.Anything, mocks.Anything) + + s, _ := sqlx.NewSink(db, 2, exec) s.WithPipe(pipe) + _ = s.Process(streams.NewMessage("test1", "test1")) - err = s.Process(streams.NewMessage("test", "test")) + err := s.Commit() assert.Error(t, err) - if err := mock.ExpectationsWereMet(); err != nil { + if err := dbMock.ExpectationsWereMet(); err != nil { t.Errorf("There were unfulfilled expectations: %s", err) } } func TestSink_Close(t *testing.T) { - db, mock, err := sqlmock.New() - assert.NoError(t, err) + db, dbMock := newDB(t) defer db.Close() + dbMock.ExpectClose() - mock.ExpectClose() - + exec := new(MockExecutor) pipe := mocks.NewPipe(t) - s, _ := sql.NewSink(db, func(*sql2.Tx, *streams.Message) error { - return nil - }, sql.WithBatchMessages(10)) + s, _ := sqlx.NewSink(db, 10, exec) s.WithPipe(pipe) - err = s.Close() + err := s.Close() assert.NoError(t, err) - if err := mock.ExpectationsWereMet(); err != nil { + if err := dbMock.ExpectationsWereMet(); err != nil { t.Errorf("There were unfulfilled expectations: %s", err) } } -func TestSink_CloseWithTxError(t *testing.T) { - db, mock, err := sqlmock.New() - assert.NoError(t, err) +func TestSink_CloseWithTxRollback(t *testing.T) { + db, dbMock := newDB(t) defer db.Close() + dbMock.ExpectBegin() + dbMock.ExpectRollback() + dbMock.ExpectClose() - mock.ExpectBegin() - mock.ExpectCommit().WillReturnError(errors.New("test error")) + exec := new(MockExecutor) + exec.On("Exec", mock.Anything, mock.Anything).Return(nil) pipe := mocks.NewPipe(t) - s, _ := sql.NewSink(db, func(*sql2.Tx, *streams.Message) error { - return nil - }, sql.WithBatchMessages(10)) + pipe.ExpectMark(mocks.Anything, mocks.Anything) + + s, _ := sqlx.NewSink(db, 10, exec) s.WithPipe(pipe) - err = s.Process(streams.NewMessage("test", "test")) - assert.NoError(t, err) + _ = s.Process(streams.NewMessage("test", "test")) - err = s.Close() + err := s.Close() - assert.Error(t, err) - if err := mock.ExpectationsWereMet(); err != nil { + assert.NoError(t, err) + if err := dbMock.ExpectationsWereMet(); err != nil { t.Errorf("There were unfulfilled expectations: %s", err) } } + +type MockExecutor struct { + mock.Mock +} + +func (m *MockExecutor) Exec(tx *sql.Tx, msg *streams.Message) error { + args := m.Called(tx, msg) + return args.Error(0) +} + +type MockExecutorTx struct { + mock.Mock +} + +func (m *MockExecutorTx) Begin(tx *sql.Tx) error { + args := m.Called(tx) + return args.Error(0) +} + +func (m *MockExecutorTx) Exec(tx *sql.Tx, msg *streams.Message) error { + args := m.Called(tx, msg) + return args.Error(0) +} + +func (m *MockExecutorTx) Commit(tx *sql.Tx) error { + args := m.Called(tx) + return args.Error(0) +} diff --git a/stream.go b/stream.go index b0e8c31..6ff191b 100644 --- a/stream.go +++ b/stream.go @@ -20,7 +20,7 @@ func (sb *StreamBuilder) Source(name string, source Source) *Stream { } // Build builds the stream Topology. -func (sb *StreamBuilder) Build() *Topology { +func (sb *StreamBuilder) Build() (*Topology, []error) { return sb.tp.Build() } @@ -37,7 +37,7 @@ func newStream(tp *TopologyBuilder, parents []Node) *Stream { } } -// Filter filters the stream using a Predicate. +// Filter filters the stream using a predicate. func (s *Stream) Filter(name string, pred Predicate) *Stream { p := NewFilterProcessor(pred) n := s.tp.AddProcessor(name, p, s.parents) @@ -45,7 +45,12 @@ func (s *Stream) Filter(name string, pred Predicate) *Stream { return newStream(s.tp, []Node{n}) } -// Branch branches a stream based in the given Predcates. +// FilterFunc filters the stream using a predicate. +func (s *Stream) FilterFunc(name string, pred PredicateFunc) *Stream { + return s.Filter(name, pred) +} + +// Branch branches a stream based on the given predicates. func (s *Stream) Branch(name string, preds ...Predicate) []*Stream { p := NewBranchProcessor(preds) n := s.tp.AddProcessor(name, p, s.parents) @@ -57,7 +62,17 @@ func (s *Stream) Branch(name string, preds ...Predicate) []*Stream { return streams } -// Map runs a Mapper on the stream. +// BranchFunc branches a stream based on the given predicates. +func (s *Stream) BranchFunc(name string, preds ...PredicateFunc) []*Stream { + ps := make([]Predicate, len(preds)) + for i, fn := range preds { + ps[i] = fn + } + + return s.Branch(name, ps...) +} + +// Map runs a mapper on the stream. func (s *Stream) Map(name string, mapper Mapper) *Stream { p := NewMapProcessor(mapper) n := s.tp.AddProcessor(name, p, s.parents) @@ -65,6 +80,11 @@ func (s *Stream) Map(name string, mapper Mapper) *Stream { return newStream(s.tp, []Node{n}) } +// MapFunc runs a mapper on the stream. +func (s *Stream) MapFunc(name string, mapper MapperFunc) *Stream { + return s.Map(name, mapper) +} + // FlatMap runs a flat mapper on the stream. func (s *Stream) FlatMap(name string, mapper FlatMapper) *Stream { p := NewFlatMapProcessor(mapper) @@ -73,6 +93,11 @@ func (s *Stream) FlatMap(name string, mapper FlatMapper) *Stream { return newStream(s.tp, []Node{n}) } +// FlatMapFunc runs a flat mapper on the stream. +func (s *Stream) FlatMapFunc(name string, mapper FlatMapperFunc) *Stream { + return s.FlatMap(name, mapper) +} + // Merge merges one or more streams into this stream. func (s *Stream) Merge(name string, streams ...*Stream) *Stream { parents := []Node{} diff --git a/stream_internal_test.go b/stream_internal_test.go index d7daab8..42a6d0d 100644 --- a/stream_internal_test.go +++ b/stream_internal_test.go @@ -23,8 +23,9 @@ func TestStreamBuilder_Build(t *testing.T) { builder := NewStreamBuilder() builder.Source("test", source).Process("test1", proc) - top := builder.Build() + top, errs := builder.Build() + assert.Len(t, errs, 0) assert.Len(t, top.Sources(), 1) assert.Len(t, top.Processors(), 1) assert.Contains(t, top.Sources(), source) @@ -34,9 +35,25 @@ func TestStream_Filter(t *testing.T) { source := &streamSource{} builder := NewStreamBuilder() - stream := builder.Source("source", source).Filter("test", func(msg *Message) (bool, error) { - return true, nil - }) + stream := builder.Source("source", source). + Filter("test", PredicateFunc(func(msg *Message) (bool, error) { + return true, nil + })) + + assert.Len(t, stream.parents, 1) + assert.IsType(t, &ProcessorNode{}, stream.parents[0]) + assert.Equal(t, stream.parents[0].(*ProcessorNode).name, "test") + assert.IsType(t, &FilterProcessor{}, stream.parents[0].(*ProcessorNode).processor) +} + +func TestStream_FilterFunc(t *testing.T) { + source := &streamSource{} + builder := NewStreamBuilder() + + stream := builder.Source("source", source). + FilterFunc("test", func(msg *Message) (bool, error) { + return true, nil + }) assert.Len(t, stream.parents, 1) assert.IsType(t, &ProcessorNode{}, stream.parents[0]) @@ -49,6 +66,31 @@ func TestStream_Branch(t *testing.T) { builder := NewStreamBuilder() streams := builder.Source("source", source).Branch( + "test", + PredicateFunc(func(msg *Message) (bool, error) { + return true, nil + }), + PredicateFunc(func(msg *Message) (bool, error) { + return true, nil + }), + ) + + assert.Len(t, streams, 2) + assert.Len(t, streams[0].parents, 1) + assert.IsType(t, &ProcessorNode{}, streams[0].parents[0]) + assert.Equal(t, streams[0].parents[0].(*ProcessorNode).name, "test") + assert.IsType(t, &BranchProcessor{}, streams[0].parents[0].(*ProcessorNode).processor) + assert.Len(t, streams[1].parents, 1) + assert.IsType(t, &ProcessorNode{}, streams[1].parents[0]) + assert.Equal(t, streams[1].parents[0].(*ProcessorNode).name, "test") + assert.IsType(t, &BranchProcessor{}, streams[1].parents[0].(*ProcessorNode).processor) +} + +func TestStream_BranchFunc(t *testing.T) { + source := &streamSource{} + builder := NewStreamBuilder() + + streams := builder.Source("source", source).BranchFunc( "test", func(msg *Message) (bool, error) { return true, nil @@ -74,7 +116,22 @@ func TestStream_Map(t *testing.T) { builder := NewStreamBuilder() stream := builder.Source("source", source). - Map("test", func(msg *Message) (*Message, error) { + Map("test", MapperFunc(func(msg *Message) (*Message, error) { + return nil, nil + })) + + assert.Len(t, stream.parents, 1) + assert.IsType(t, &ProcessorNode{}, stream.parents[0]) + assert.Equal(t, stream.parents[0].(*ProcessorNode).name, "test") + assert.IsType(t, &MapProcessor{}, stream.parents[0].(*ProcessorNode).processor) +} + +func TestStream_MapFunc(t *testing.T) { + source := &streamSource{} + builder := NewStreamBuilder() + + stream := builder.Source("source", source). + MapFunc("test", func(msg *Message) (*Message, error) { return nil, nil }) @@ -89,7 +146,22 @@ func TestStream_FlatMap(t *testing.T) { builder := NewStreamBuilder() stream := builder.Source("source", source). - FlatMap("test", func(msg *Message) ([]*Message, error) { + FlatMap("test", FlatMapperFunc(func(msg *Message) ([]*Message, error) { + return nil, nil + })) + + assert.Len(t, stream.parents, 1) + assert.IsType(t, &ProcessorNode{}, stream.parents[0]) + assert.Equal(t, stream.parents[0].(*ProcessorNode).name, "test") + assert.IsType(t, &FlatMapProcessor{}, stream.parents[0].(*ProcessorNode).processor) +} + +func TestStream_FlatMapFunc(t *testing.T) { + source := &streamSource{} + builder := NewStreamBuilder() + + stream := builder.Source("source", source). + FlatMapFunc("test", func(msg *Message) ([]*Message, error) { return nil, nil }) diff --git a/supervisor.go b/supervisor.go new file mode 100644 index 0000000..a31c390 --- /dev/null +++ b/supervisor.go @@ -0,0 +1,266 @@ +package streams + +import ( + "errors" + "io" + "sync" + "sync/atomic" + "time" + + "github.com/msales/pkg/v3/syncx" +) + +const ( + stopped uint32 = iota + running +) + +var ( + // ErrNotRunning is returned when trying to perform an action that requires a running supervisor. + ErrNotRunning = errors.New("streams: supervisor not running") + // ErrAlreadyRunning is returned when starting a supervisor that has already been started. + ErrAlreadyRunning = errors.New("streams: supervisor already running") + // ErrUnknownPump is returned when the supervisor is unable to find a pump for a given processor. + ErrUnknownPump = errors.New("streams: encountered an unknown pump") +) + +// NopLocker is a no-op implementation of Locker interface. +type nopLocker struct{} + +// Lock performs no action. +func (*nopLocker) Lock() {} + +// Unlock performs no action. +func (*nopLocker) Unlock() {} + + +// Supervisor represents a concurrency-safe stream supervisor. +// +// The Supervisor performs a commit in a concurrently-safe manner. +// There can only ever be 1 ongoing commit at any given time. +type Supervisor interface { + io.Closer + + // WithPumps sets a map of Pumps. + WithPumps(pumps map[Node]Pump) + + // Start starts the supervisor. + // + // This function should initiate all the background tasks of the Supervisor. + // It must not be a blocking call. + Start() error + + // Commit performs a global commit sequence. + // + // If triggered by a Pipe, the associated Processor should be passed. + Commit(Processor) error +} + +type supervisor struct { + store Metastore + + pumps map[Processor]Pump + + commitMu syncx.Mutex +} + +// NewSupervisor returns a new Supervisor instance. +func NewSupervisor(store Metastore) Supervisor { + return &supervisor{ + store: store, + } +} + +// Start starts the supervisor. +// +// This function should initiate all the background tasks of the Supervisor. +// It must not be a blocking call. +func (s *supervisor) Start() error { + return nil +} + +// WithPumps sets a map of Pumps. +func (s *supervisor) WithPumps(pumps map[Node]Pump) { + mapped := make(map[Processor]Pump, len(pumps)) + for node, pump := range pumps { + mapped[node.Processor()] = pump + } + + s.pumps = mapped +} + +// Commit performs a global commit sequence. +// +// If triggered by a Pipe, the associated Processor should be passed. +func (s *supervisor) Commit(caller Processor) error { + if !s.commitMu.TryLock() { + // We are already committing. + return nil + } + defer s.commitMu.Unlock() + + metadata, err := s.store.PullAll() + if err != nil { + return err + } + + var metaItems Metaitems + for proc, items := range metadata { + if comm, ok := proc.(Committer); ok { + newItems, err := s.commit(caller, comm) + if err != nil { + return err + } + + items = items.Update(newItems) + } + + metaItems = metaItems.Merge(items) + } + + for _, item := range metaItems { + if item.Source == nil { + continue + } + + err := item.Source.Commit(item.Metadata) + if err != nil { + return err + } + } + + return nil +} + +func (s *supervisor) commit(caller Processor, comm Committer) (Metaitems, error) { + locker, err := s.getLocker(caller, comm) + if err != nil { + return nil, err + } + + locker.Lock() + defer locker.Unlock() + + err = comm.Commit() + if err != nil { + return nil, err + } + + // Pull metadata of messages that have been processed between the initial pull and the lock. + return s.store.Pull(comm) +} + +func (s *supervisor) getLocker(caller, proc Processor) (sync.Locker, error) { + if caller == proc { + return &nopLocker{}, nil + } + + pump, ok := s.pumps[proc] + if !ok { + return nil, ErrUnknownPump + } + + return pump, nil +} + +// Permanently locks the supervisor, ensuring that no commit will ever be executed. +func (s *supervisor) Close() error { + s.commitMu.Lock() + + return nil +} + +type timedSupervisor struct { + inner Supervisor + d time.Duration + errFn ErrorFunc + + t *time.Ticker + resetCh chan struct{} + running uint32 +} + +// NewTimedSupervisor returns a supervisor that commits automatically. +func NewTimedSupervisor(inner Supervisor, d time.Duration, errFn ErrorFunc) Supervisor { + return &timedSupervisor{ + inner: inner, + d: d, + errFn: errFn, + resetCh: make(chan struct{}, 1), + } +} + +// WithPumps sets a map of Pumps. +func (s *timedSupervisor) WithPumps(pumps map[Node]Pump) { + s.inner.WithPumps(pumps) +} + +// Start starts the supervisor. +// +// This function should initiate all the background tasks of the Supervisor. +// It must not be a blocking call. +func (s *timedSupervisor) Start() error { + if !s.setRunning() { + return ErrAlreadyRunning + } + + s.t = time.NewTicker(s.d) + + go func() { + for { + select { + case <-s.t.C: + err := s.inner.Commit(nil) + if err != nil { + s.errFn(err) + } + + case <-s.resetCh: + s.t.Stop() + s.t = time.NewTicker(s.d) + } + } + }() + + return s.inner.Start() +} + +// Close stops the timer and closes the inner supervisor. +func (s *timedSupervisor) Close() error { + if !s.setStopped() { + return ErrNotRunning + } + + s.t.Stop() + + return s.inner.Close() +} + +// Commit performs a global commit sequence. +// +// If triggered by a Pipe, the associated Processor should be passed. +func (s *timedSupervisor) Commit(caller Processor) error { + if !s.isRunning() { + return ErrNotRunning + } + + err := s.inner.Commit(caller) + if err != nil { + return err + } + s.resetCh <- struct{}{} + + return nil +} + +func (s *timedSupervisor) setRunning() bool { + return atomic.CompareAndSwapUint32(&s.running, stopped, running) +} + +func (s *timedSupervisor) isRunning() bool { + return running == atomic.LoadUint32(&s.running) +} + +func (s *timedSupervisor) setStopped() bool { + return atomic.CompareAndSwapUint32(&s.running, running, stopped) +} diff --git a/supervisor_internal_test.go b/supervisor_internal_test.go new file mode 100644 index 0000000..47691b4 --- /dev/null +++ b/supervisor_internal_test.go @@ -0,0 +1,87 @@ +package streams + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestSupervisor_WithPumps(t *testing.T) { + supervisor := &supervisor{} + + pump := &fakePump{} + processor := &fakeProcessor{} + node := newMockNode(processor) + + input := map[Node]Pump{ + node: pump, + } + + expected := map[Processor]Pump{ + processor: pump, + } + + supervisor.WithPumps(input) + + assert.Equal(t, expected, supervisor.pumps) +} + +func TestSupervisor_Commit_CommitPending(t *testing.T) { + supervisor := &supervisor{} + supervisor.commitMu.Lock() + + err := supervisor.Commit(nil) + + assert.NoError(t, err) +} + +type fakePump struct{} + +func (*fakePump) Lock() {} + +func (*fakePump) Unlock() {} + +func (*fakePump) Accept(*Message) error { + return nil +} + +func (*fakePump) Stop() { +} + +func (*fakePump) Close() error { + return nil +} + +type fakeProcessor struct{} + +func (*fakeProcessor) WithPipe(Pipe) {} + +func (*fakeProcessor) Process(*Message) error { + return nil +} + +func (*fakeProcessor) Close() error { + return nil +} + +type fakeNode struct { + p Processor +} + +func newMockNode(p Processor) *fakeNode { + return &fakeNode{p: p} +} + +func (*fakeNode) Name() string { + return "" +} + +func (*fakeNode) AddChild(n Node) {} + +func (*fakeNode) Children() []Node { + return nil +} + +func (n *fakeNode) Processor() Processor { + return n.p +} diff --git a/supervisor_test.go b/supervisor_test.go new file mode 100644 index 0000000..49542e5 --- /dev/null +++ b/supervisor_test.go @@ -0,0 +1,468 @@ +package streams_test + +import ( + "errors" + "testing" + "time" + + "github.com/msales/streams/v2" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func TestNewSupervisor(t *testing.T) { + supervisor := streams.NewSupervisor(nil) + + assert.Implements(t, (*streams.Supervisor)(nil), supervisor) +} + +func TestSupervisor_Commit(t *testing.T) { + src1 := source(nil) + src2 := source(nil) + + comm := committer(nil) + proc := new(MockProcessor) + + pump1 := pump() + pump2 := pump() + + meta := map[streams.Processor]streams.Metaitems{ + comm: { + {Source: src1, Metadata: metadata()}, + {Source: src2, Metadata: metadata()}, + }, + proc: { + {Source: src1, Metadata: metadata()}, + }, + } + + store := new(MockMetastore) + store.On("PullAll").Return(meta, nil) + store.On("Pull", comm).Return(streams.Metaitems{{Source: src1, Metadata: metadata()}}, nil) + + pumps := map[streams.Node]streams.Pump{ + node(comm): pump1, + node(proc): pump2, + } + + supervisor := streams.NewSupervisor(store) + supervisor.WithPumps(pumps) + + err := supervisor.Commit(nil) + + assert.NoError(t, err) + src1.AssertCalled(t, "Commit", mock.Anything) + src2.AssertCalled(t, "Commit", mock.Anything) + store.AssertCalled(t, "Pull", comm) + pump1.AssertCalled(t, "Lock", mock.Anything) + pump1.AssertCalled(t, "Unlock", mock.Anything) + pump2.AssertNotCalled(t, "Lock", mock.Anything) + pump2.AssertNotCalled(t, "Unlock", mock.Anything) +} + +func TestSupervisor_Commit_WithCaller(t *testing.T) { + src := source(nil) + comm := committer(nil) + pump := pump() + + meta := map[streams.Processor]streams.Metaitems{ + comm: {{Source: src, Metadata: metadata()}}, + } + + store := new(MockMetastore) + store.On("PullAll").Return(meta, nil) + store.On("Pull", comm).Return(nil, nil) + + pumps := map[streams.Node]streams.Pump{node(comm): pump} + + supervisor := streams.NewSupervisor(store) + supervisor.WithPumps(pumps) + + err := supervisor.Commit(comm) + + assert.NoError(t, err) + src.AssertCalled(t, "Commit", mock.Anything) + pump.AssertNotCalled(t, "Lock", mock.Anything) + pump.AssertNotCalled(t, "Unlock", mock.Anything) +} + +func TestSupervisor_Commit_NullSource(t *testing.T) { + comm := committer(nil) + pump := pump() + + meta := map[streams.Processor]streams.Metaitems{ + comm: {{Source: nil, Metadata: nil}}, + } + store := new(MockMetastore) + store.On("PullAll").Return(meta, nil) + store.On("Pull", comm).Return(nil, nil) + + pumps := map[streams.Node]streams.Pump{node(comm): pump} + + supervisor := streams.NewSupervisor(store) + supervisor.WithPumps(pumps) + + err := supervisor.Commit(nil) + + assert.NoError(t, err) + pump.AssertCalled(t, "Lock", mock.Anything) + pump.AssertCalled(t, "Unlock", mock.Anything) +} + +func TestSupervisor_Commit_PullAllError(t *testing.T) { + store := new(MockMetastore) + store.On("PullAll").Return(nil, errors.New("error")) + + supervisor := streams.NewSupervisor(store) + + err := supervisor.Commit(nil) + + assert.Error(t, err) +} + +func TestSupervisor_Commit_PullError(t *testing.T) { + src := source(nil) + comm := committer(nil) + pump := pump() + + meta := map[streams.Processor]streams.Metaitems{ + comm: {{Source: src, Metadata: metadata()}}, + } + + store := new(MockMetastore) + store.On("PullAll").Return(meta, nil) + store.On("Pull", comm).Return(nil, errors.New("error")) + + pumps := map[streams.Node]streams.Pump{node(comm): pump} + + supervisor := streams.NewSupervisor(store) + supervisor.WithPumps(pumps) + + err := supervisor.Commit(nil) + + assert.Error(t, err) + src.AssertNotCalled(t, "Commit", mock.Anything) + pump.AssertCalled(t, "Lock", mock.Anything) + pump.AssertCalled(t, "Unlock", mock.Anything) +} + +func TestSupervisor_Commit_UnknownPump(t *testing.T) { + src := source(nil) + comm := committer(nil) + + pumps := map[streams.Node]streams.Pump{} + meta := map[streams.Processor]streams.Metaitems{ + comm: {{Source: src, Metadata: metadata()}}, + } + + store := new(MockMetastore) + store.On("PullAll").Return(meta, nil) + + supervisor := streams.NewSupervisor(store) + supervisor.WithPumps(pumps) + + err := supervisor.Commit(nil) + + assert.Error(t, err) + src.AssertNotCalled(t, "Commit", mock.Anything) +} + +func TestSupervisor_Commit_CommitterError(t *testing.T) { + src := source(nil) + comm := committer(errors.New("error")) + pump := pump() + + meta := map[streams.Processor]streams.Metaitems{ + comm: {{Source: src, Metadata: metadata()}}, + } + + store := new(MockMetastore) + store.On("PullAll").Return(meta, nil) + + pumps := map[streams.Node]streams.Pump{node(comm): pump} + + supervisor := streams.NewSupervisor(store) + supervisor.WithPumps(pumps) + + err := supervisor.Commit(nil) + + assert.Error(t, err) + src.AssertNotCalled(t, "Commit", mock.Anything) + pump.AssertCalled(t, "Lock", mock.Anything) + pump.AssertCalled(t, "Unlock", mock.Anything) +} + +func TestSupervisor_Commit_SourceError(t *testing.T) { + src := source(errors.New("error")) + comm := committer(nil) + pump := pump() + + meta := map[streams.Processor]streams.Metaitems{ + comm: {{Source: src, Metadata: metadata()}}, + } + store := new(MockMetastore) + store.On("PullAll").Return(meta, nil) + store.On("Pull", comm).Return(nil, nil) + + pumps := map[streams.Node]streams.Pump{node(comm): pump} + + supervisor := streams.NewSupervisor(store) + supervisor.WithPumps(pumps) + + err := supervisor.Commit(nil) + + assert.Error(t, err) + pump.AssertCalled(t, "Lock", mock.Anything) + pump.AssertCalled(t, "Unlock", mock.Anything) +} + +func BenchmarkSupervisor_Commit(b *testing.B) { + p := &fakeCommitter{} + src := &fakeSource{} + meta := &fakeMetadata{} + store := &fakeMetastore{Metadata: map[streams.Processor]streams.Metaitems{p: {{Source: src, Metadata: meta}}}} + node := &fakeNode{Proc: p} + pump := &fakePump{} + supervisor := streams.NewSupervisor(store) + supervisor.WithPumps(map[streams.Node]streams.Pump{node: pump}) + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = supervisor.Commit(p) + } +} + +func BenchmarkSupervisor_CommitGlobal(b *testing.B) { + p := &fakeCommitter{} + src := &fakeSource{} + meta := &fakeMetadata{} + store := &fakeMetastore{Metadata: map[streams.Processor]streams.Metaitems{p: {{Source: src, Metadata: meta}}}} + node := &fakeNode{Proc: p} + pump := &fakePump{} + supervisor := streams.NewSupervisor(store) + supervisor.WithPumps(map[streams.Node]streams.Pump{node: pump}) + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = supervisor.Commit(nil) + } +} + +func TestNewTimedSupervisor(t *testing.T) { + supervisor := streams.NewTimedSupervisor(nil, 0, nil) + + assert.Implements(t, (*streams.Supervisor)(nil), supervisor) +} + +func TestTimedSupervisor_WithPumps(t *testing.T) { + pumps := map[streams.Node]streams.Pump{} + inner := new(MockSupervisor) + inner.On("WithPumps", pumps).Return() + + supervisor := streams.NewTimedSupervisor(inner, 0, nil) + + supervisor.WithPumps(pumps) + + inner.AssertCalled(t, "WithPumps", pumps) +} + +func TestTimedSupervisor_Start(t *testing.T) { + wantErr := errors.New("error") + inner := new(MockSupervisor) + inner.On("Commit", nil).Return(nil) + inner.On("Start").Return(wantErr) + + supervisor := streams.NewTimedSupervisor(inner, 1, nil) + err := supervisor.Start() + + inner.AssertCalled(t, "Start") + assert.Equal(t, wantErr, err) +} + +func TestTimedSupervisor_GlobalCommitSourceError(t *testing.T) { + inner := new(MockSupervisor) + inner.On("Start").Return(nil) + inner.On("Commit", nil).Return(errors.New("error")) + inner.On("Close").Return(nil) + + called := false + supervisor := streams.NewTimedSupervisor(inner, 1, func(err error) { + assert.Equal(t, "error", err.Error()) + called = true + }) + _ = supervisor.Start() + defer supervisor.Close() + + time.Sleep(time.Millisecond) + + inner.AssertCalled(t, "Commit", nil) + assert.True(t, called, "Expected error function to be called") +} + +func TestTimedSupervisor_Start_AlreadyRunning(t *testing.T) { + inner := new(MockSupervisor) + inner.On("Commit", nil).Return(nil) + inner.On("Start").Return(nil) + inner.On("Close").Return(nil) + + supervisor := streams.NewTimedSupervisor(inner, 1, nil) + err := supervisor.Start() + defer supervisor.Close() + + inner.AssertCalled(t, "Start") + assert.NoError(t, err) + + err = supervisor.Start() + + assert.Error(t, err) +} + +func TestTimedSupervisor_Close(t *testing.T) { + inner := new(MockSupervisor) + inner.On("Commit", nil).Return(nil) + inner.On("Start").Return(nil) + inner.On("Close").Return(nil) + + supervisor := streams.NewTimedSupervisor(inner, 1, nil) + _ = supervisor.Start() + defer supervisor.Close() + + err := supervisor.Close() + + inner.AssertCalled(t, "Close") + assert.NoError(t, err) +} + +func TestTimedSupervisor_Close_NotRunning(t *testing.T) { + inner := new(MockSupervisor) + inner.On("Commit", nil).Return(nil) + inner.On("Close").Return(nil) + + supervisor := streams.NewTimedSupervisor(inner, 1, nil) + + err := supervisor.Close() + + assert.Error(t, err) +} + +func TestTimedSupervisor_Close_WithError(t *testing.T) { + wantErr := errors.New("error") + inner := new(MockSupervisor) + inner.On("Start").Return(nil) + inner.On("Close").Return(wantErr) + + supervisor := streams.NewTimedSupervisor(inner, 1*time.Second, nil) + _ = supervisor.Start() + defer supervisor.Close() + + err := supervisor.Close() + + assert.Equal(t, wantErr, err) + inner.AssertCalled(t, "Close") +} + +func TestTimedSupervisor_Commit(t *testing.T) { + caller := new(MockProcessor) + inner := new(MockSupervisor) + inner.On("Start").Return(nil) + inner.On("Commit", nil).Return(nil) + inner.On("Commit", caller).Return(nil) + inner.On("Close").Return(nil) + + supervisor := streams.NewTimedSupervisor(inner, 1, nil) + _ = supervisor.Start() + defer supervisor.Close() + + err := supervisor.Commit(caller) + + assert.NoError(t, err) + inner.AssertCalled(t, "Commit", caller) +} + +func TestTimedSupervisor_CommitResetsTimer(t *testing.T) { + caller := new(MockProcessor) + inner := new(MockSupervisor) + inner.On("Start").Return(nil) + inner.On("Commit", mock.Anything).Return(nil) + inner.On("Close").Return(nil) + + supervisor := streams.NewTimedSupervisor(inner, 10*time.Millisecond, nil) + _ = supervisor.Start() + defer supervisor.Close() + + time.Sleep(5 * time.Millisecond) + + _ = supervisor.Commit(caller) + + time.Sleep(5 * time.Millisecond) + + inner.AssertNumberOfCalls(t, "Commit", 1) +} + +func TestTimedSupervisor_CommitSourceError(t *testing.T) { + wantErr := errors.New("error") + caller := new(MockProcessor) + inner := new(MockSupervisor) + inner.On("Start").Return(nil) + inner.On("Commit", caller).Return(wantErr) + + supervisor := streams.NewTimedSupervisor(inner, 1*time.Second, nil) + err := supervisor.Start() + + assert.NoError(t, err) + + err = supervisor.Commit(caller) + + inner.AssertCalled(t, "Commit", caller) + assert.Equal(t, wantErr, err) +} + +func TestTimedSupervisor_Commit_NotRunning(t *testing.T) { + caller := new(MockProcessor) + inner := new(MockSupervisor) + + supervisor := streams.NewTimedSupervisor(inner, 1, nil) + + err := supervisor.Commit(caller) + + assert.Error(t, err) +} + +func source(err error) *MockSource { + src := new(MockSource) + src.On("Commit", mock.Anything).Return(err) + + return src +} + +func committer(err error) *MockCommitter { + c := new(MockCommitter) + c.On("Commit").Return(err) + + return c +} + +func node(p streams.Processor) *MockNode { + n := new(MockNode) + n.On("Processor").Return(p) + + return n +} + +func pump() *MockPump { + p := new(MockPump) + p.On("Lock").Return() + p.On("Unlock").Return() + + return p +} + +func metadata() *MockMetadata { + meta := new(MockMetadata) + meta.On("Merge", mock.Anything).Return(meta) + meta.On("Update", mock.Anything).Return(meta) + + return meta +} diff --git a/task.go b/task.go index e3c17c8..2aaf5ec 100644 --- a/task.go +++ b/task.go @@ -2,11 +2,22 @@ package streams import ( "errors" + "time" ) // ErrorFunc represents a streams error handling function. type ErrorFunc func(error) +// TaskOptFunc represents a function that sets up the Task. +type TaskOptFunc func(t *streamTask) + +// WithCommitInterval defines an interval of automatic commits. +func WithCommitInterval(d time.Duration) TaskOptFunc { + return func(t *streamTask) { + t.supervisor = NewTimedSupervisor(t.supervisor, d, t.errorFn) + } +} + // Task represents a streams task. type Task interface { // Start starts the streams processors. @@ -23,43 +34,57 @@ type streamTask struct { running bool errorFn ErrorFunc - srcPumps SourcePumps - pumps map[Node]Pump + store Metastore + supervisor Supervisor + srcPumps SourcePumps + pumps map[Node]Pump } // NewTask creates a new streams task. -func NewTask(topology *Topology) Task { - return &streamTask{ - topology: topology, - srcPumps: SourcePumps{}, - pumps: map[Node]Pump{}, +func NewTask(topology *Topology, opts ...TaskOptFunc) Task { + store := NewMetastore() + + t := &streamTask{ + topology: topology, + store: store, + supervisor: NewSupervisor(store), + srcPumps: SourcePumps{}, + pumps: map[Node]Pump{}, } + + for _, optFn := range opts { + optFn(t) + } + + return t } // Start starts the streams processors. func (t *streamTask) Start() error { // If we are already running, exit if t.running { - return errors.New("streams: task already started") + return errors.New("streams: task already running") } t.running = true t.setupTopology() - return nil + return t.supervisor.Start() } func (t *streamTask) setupTopology() { nodes := flattenNodeTree(t.topology.Sources()) reverseNodes(nodes) for _, node := range nodes { - pipe := NewPipe(t.resolvePumps(node.Children())) + pipe := NewPipe(t.store, t.supervisor, node.Processor(), t.resolvePumps(node.Children())) node.Processor().WithPipe(pipe) pump := NewPump(node, pipe.(TimedPipe), t.handleError) t.pumps[node] = pump } + t.supervisor.WithPumps(t.pumps) + for source, node := range t.topology.Sources() { srcPump := NewSourcePump(node.Name(), source, t.resolvePumps(node.Children()), t.handleError) t.srcPumps = append(t.srcPumps, srcPump) @@ -83,13 +108,30 @@ func (t *streamTask) Close() error { } func (t *streamTask) closeTopology() error { + // Stop the pumps nodes := flattenNodeTree(t.topology.Sources()) + for _, node := range nodes { + t.pumps[node].Stop() + } + + // Commit any outstanding batches and metadata + if err := t.supervisor.Commit(nil); err != nil { + return err + } + + // Close the supervisor + if err := t.supervisor.Close(); err != nil { + return err + } + + // Close the pumps for _, node := range nodes { if err := t.pumps[node].Close(); err != nil { return err } } + // Close the sources for _, srcPump := range t.srcPumps { if err := srcPump.Close(); err != nil { return err diff --git a/task_internal_test.go b/task_internal_test.go new file mode 100644 index 0000000..6083809 --- /dev/null +++ b/task_internal_test.go @@ -0,0 +1,70 @@ +package streams + +import ( + "testing" + + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" +) + +func TestWithCommitInterval(t *testing.T) { + task := NewTask(&Topology{sources: map[Source]Node{}}, WithCommitInterval(1)) + + assert.IsType(t, &timedSupervisor{}, task.(*streamTask).supervisor) +} + +func TestStreamTask_StartSupervisorStartError(t *testing.T) { + task := &streamTask{ + topology: &Topology{sources: map[Source]Node{}}, + supervisor: &fakeSupervisor{StartErr: errors.New("start error")}, + } + + err := task.Start() + + assert.Error(t, err) + assert.Equal(t, "start error", err.Error()) +} + +func TestStreamTask_CloseSupervisorCommitError(t *testing.T) { + task := &streamTask{ + topology: &Topology{sources: map[Source]Node{}}, + supervisor: &fakeSupervisor{CommitError: errors.New("commit error")}, + } + + err := task.Close() + + assert.Error(t, err) + assert.Equal(t, "commit error", err.Error()) +} + +func TestStreamTask_CloseSupervisorCloseError(t *testing.T) { + task := &streamTask{ + topology: &Topology{sources: map[Source]Node{}}, + supervisor: &fakeSupervisor{CloseErr: errors.New("close error")}, + } + + err := task.Close() + + assert.Error(t, err) + assert.Equal(t, "close error", err.Error()) +} + +type fakeSupervisor struct { + StartErr error + CommitError error + CloseErr error +} + +func (s *fakeSupervisor) Close() error { + return s.CloseErr +} + +func (s *fakeSupervisor) WithPumps(pumps map[Node]Pump) {} + +func (s *fakeSupervisor) Start() error { + return s.StartErr +} + +func (s *fakeSupervisor) Commit(Processor) error { + return s.CommitError +} diff --git a/task_test.go b/task_test.go index ab83566..5674bf8 100644 --- a/task_test.go +++ b/task_test.go @@ -5,7 +5,7 @@ import ( "testing" "time" - "github.com/msales/streams" + "github.com/msales/streams/v2" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" ) @@ -31,10 +31,11 @@ func TestStreamTask_ConsumesMessages(t *testing.T) { b := streams.NewStreamBuilder() b.Source("src", &chanSource{msgs: msgs}). - Map("pass-through", passThroughMapper). + Map("pass-through", streams.MapperFunc(passThroughMapper)). Process("processor", p) - task := streams.NewTask(b.Build()) + tp, _ := b.Build() + task := streams.NewTask(tp) task.OnError(func(err error) { t.FailNow() }) @@ -48,7 +49,7 @@ func TestStreamTask_ConsumesMessages(t *testing.T) { time.Sleep(time.Millisecond) - task.Close() + _ = task.Close() p.AssertExpectations(t) } @@ -61,13 +62,14 @@ func TestStreamTask_Throughput(t *testing.T) { b := streams.NewStreamBuilder() b.Source("src", &chanSource{msgs: msgs}). - Map("pass-through", passThroughMapper). - Map("count", func(msg *streams.Message) (*streams.Message, error) { + Map("pass-through", streams.MapperFunc(passThroughMapper)). + Map("count", streams.MapperFunc(func(msg *streams.Message) (*streams.Message, error) { count++ return msg, nil - }) + })) - task := streams.NewTask(b.Build()) + tp, _ := b.Build() + task := streams.NewTask(tp) task.OnError(func(err error) { t.FailNow() }) @@ -83,7 +85,7 @@ func TestStreamTask_Throughput(t *testing.T) { time.Sleep(time.Millisecond) - task.Close() + _ = task.Close() assert.Equal(t, 100, count) } @@ -94,16 +96,17 @@ func TestStreamTask_CannotStartTwice(t *testing.T) { b := streams.NewStreamBuilder() b.Source("src", &chanSource{msgs: msgs}) - task := streams.NewTask(b.Build()) + tp, _ := b.Build() + task := streams.NewTask(tp) task.OnError(func(err error) { t.FailNow() }) - task.Start() + _ = task.Start() err := task.Start() - task.Close() + _ = task.Close() assert.Error(t, err) } @@ -118,16 +121,17 @@ func TestStreamTask_HandleSourceError(t *testing.T) { b := streams.NewStreamBuilder() b.Source("src", s) - task := streams.NewTask(b.Build()) + tp, _ := b.Build() + task := streams.NewTask(tp) task.OnError(func(err error) { gotError = true }) - task.Start() + _ = task.Start() time.Sleep(time.Millisecond) - task.Close() + _ = task.Close() assert.True(t, gotError) } @@ -147,18 +151,19 @@ func TestStreamTask_HandleProcessorError(t *testing.T) { b.Source("src", &chanSource{msgs: msgs}). Process("processor", p) - task := streams.NewTask(b.Build()) + tp, _ := b.Build() + task := streams.NewTask(tp) task.OnError(func(err error) { gotError = true }) - task.Start() + _ = task.Start() msgs <- msg time.Sleep(time.Millisecond) - task.Close() + _ = task.Close() assert.True(t, gotError) } @@ -176,8 +181,9 @@ func TestStreamTask_HandleCloseWithProcessorError(t *testing.T) { b.Source("src", s). Process("processor", p) - task := streams.NewTask(b.Build()) - task.Start() + tp, _ := b.Build() + task := streams.NewTask(tp) + _ = task.Start() time.Sleep(time.Millisecond) @@ -194,8 +200,9 @@ func TestStreamTask_HandleCloseWithSourceError(t *testing.T) { b := streams.NewStreamBuilder() b.Source("src", s) - task := streams.NewTask(b.Build()) - task.Start() + tp, _ := b.Build() + task := streams.NewTask(tp) + _ = task.Start() time.Sleep(time.Millisecond) diff --git a/topology.go b/topology.go index d294b7b..bbf4cd8 100644 --- a/topology.go +++ b/topology.go @@ -1,5 +1,7 @@ package streams +import "errors" + // Node represents a topology node. type Node interface { // Name gets the node name. @@ -103,15 +105,25 @@ func (t Topology) Processors() []Node { return t.processors } +// topologyTest represents a test that should be performed on the topology. +type topologyTest func(map[Source]Node, []Node) error + // TopologyBuilder represents a topology builder. type TopologyBuilder struct { + tests []topologyTest sources map[Source]Node processors []Node } // NewTopologyBuilder creates a new TopologyBuilder. func NewTopologyBuilder() *TopologyBuilder { + tests := []topologyTest{ + sourcesConnectedTest, + committersConnectedTests, + } + return &TopologyBuilder{ + tests: tests, sources: map[Source]Node{}, processors: []Node{}, } @@ -139,11 +151,87 @@ func (tb *TopologyBuilder) AddProcessor(name string, processor Processor, parent } // Build creates an immutable Topology. -func (tb *TopologyBuilder) Build() *Topology { +func (tb *TopologyBuilder) Build() (*Topology, []error) { + var errs []error + for _, test := range tb.tests { + if err := test(tb.sources, tb.processors); err != nil { + errs = append(errs, err) + } + } + return &Topology{ sources: tb.sources, processors: tb.processors, + }, errs +} + +func sourcesConnectedTest(srcs map[Source]Node, _ []Node) error { + nodes := make([]Node, 0, len(srcs)) + for _, node := range srcs { + nodes = append(nodes, node) + } + + if len(nodes) <= 1 { + return nil + } + + if !nodesConnected(nodes) { + return errors.New("streams: not all sources are connected") + } + + return nil +} + +func committersConnectedTests(_ map[Source]Node, procs []Node) error { + var nodes []Node + for _, node := range procs { + if _, ok := node.Processor().(Committer); !ok { + continue + } + + nodes = append(nodes, node) + } + + if len(nodes) <= 1 { + return nil + } + + if nodesConnected(nodes) { + return errors.New("streams: committers are inline") } + + return nil +} + +func nodesConnected(roots []Node) bool { + if len(roots) <= 1 { + return true + } + + var nodes []Node + var visit []Node + connections := 0 + + for _, node := range roots { + visit = append(visit, node) + } + + for len(visit) > 0 { + var n Node + n, visit = visit[0], visit[1:] + nodes = append(nodes, n) + + for _, c := range n.Children() { + if contains(c, visit) || contains(c, nodes) { + connections++ + continue + } + + visit = append(visit, c) + } + } + + return connections == len(roots)-1 } func flattenNodeTree(roots map[Source]Node) []Node { @@ -163,7 +251,7 @@ func flattenNodeTree(roots map[Source]Node) []Node { } for _, c := range n.Children() { - if contains(c, visit) { + if contains(c, visit) || contains(c, nodes) { continue } diff --git a/topology_internal_test.go b/topology_internal_test.go index d5da67a..1ca4232 100644 --- a/topology_internal_test.go +++ b/topology_internal_test.go @@ -6,6 +6,125 @@ import ( "github.com/stretchr/testify/assert" ) +func TestSourcesConnectedTest(t *testing.T) { + node5 := &testNode{ + children: []Node{}, + } + node1 := &testNode{ + children: []Node{node5}, + } + node2 := &testNode{ + children: []Node{node5}, + } + + err := sourcesConnectedTest(map[Source]Node{ + testSource(1): node1, + testSource(2): node2, + }, []Node{}) + + assert.NoError(t, err) +} + +func TestSourcesConnectedTest_Error(t *testing.T) { + node1 := &testNode{ + children: []Node{}, + } + node2 := &testNode{ + children: []Node{}, + } + + err := sourcesConnectedTest(map[Source]Node{ + testSource(1): node1, + testSource(2): node2, + }, []Node{}) + + assert.Error(t, err) +} + +func TestCommittersConnectedTests(t *testing.T) { + node1 := &testNode{ + children: []Node{}, + processor: &testCommitter{}, + } + node2 := &testNode{ + children: []Node{}, + processor: &testCommitter{}, + } + + err := committersConnectedTests(map[Source]Node{}, []Node{node1, node2}) + + assert.NoError(t, err) +} + +func TestCommittersConnectedTests_Error(t *testing.T) { + node3 := &testNode{ + children: []Node{}, + processor: &testCommitter{}, + } + node2 := &testNode{ + children: []Node{node3}, + } + node1 := &testNode{ + children: []Node{node2}, + processor: &testCommitter{}, + } + + err := committersConnectedTests(map[Source]Node{}, []Node{node1, node2, node3}) + + assert.Error(t, err) +} + +func TestNodesConnected_Connected(t *testing.T) { + node5 := &testNode{ + children: []Node{}, + } + node3 := &testNode{ + children: []Node{node5}, + } + node1 := &testNode{ + children: []Node{node3}, + } + node4 := &testNode{ + children: []Node{node5}, + } + node2 := &testNode{ + children: []Node{node4}, + } + + connected := nodesConnected([]Node{node1, node2}) + + assert.True(t, connected) +} + +func TestNodesConnected_NotConnected(t *testing.T) { + node3 := &testNode{ + children: []Node{}, + } + node1 := &testNode{ + children: []Node{node3}, + } + node4 := &testNode{ + children: []Node{}, + } + node2 := &testNode{ + children: []Node{node4}, + } + + connected := nodesConnected([]Node{node1, node2}) + + assert.False(t, connected) +} + +func TestNodesConnected_OneNode(t *testing.T) { + node := &testNode{ + children: []Node{}, + } + + connected := nodesConnected([]Node{node}) + + assert.True(t, connected) +} + func TestFlattenNodeTree(t *testing.T) { node7 := &testNode{processor: &testProcessor{}} node6 := &testNode{processor: &testProcessor{}} @@ -125,3 +244,19 @@ func (p testProcessor) Process(msg *Message) error { func (p testProcessor) Close() error { return nil } + +type testCommitter struct{} + +func (p testCommitter) WithPipe(Pipe) {} + +func (p testCommitter) Process(msg *Message) error { + return nil +} + +func (p testCommitter) Commit() error { + return nil +} + +func (p testCommitter) Close() error { + return nil +} diff --git a/topology_test.go b/topology_test.go index 5bba9dc..6b7fdf5 100644 --- a/topology_test.go +++ b/topology_test.go @@ -3,7 +3,7 @@ package streams_test import ( "testing" - "github.com/msales/streams" + "github.com/msales/streams/v2" "github.com/stretchr/testify/assert" ) @@ -84,8 +84,9 @@ func TestTopologyBuilder_AddSource(t *testing.T) { tb := streams.NewTopologyBuilder() n := tb.AddSource("test", s) - to := tb.Build() + to, errs := tb.Build() + assert.Len(t, errs, 0) assert.IsType(t, &streams.SourceNode{}, n) assert.Equal(t, "test", n.(*streams.SourceNode).Name()) assert.Len(t, to.Sources(), 1) @@ -98,8 +99,9 @@ func TestTopologyBuilder_AddProcessor(t *testing.T) { tb := streams.NewTopologyBuilder() n := tb.AddProcessor("test", p, []streams.Node{pn}) - to := tb.Build() + to, errs := tb.Build() + assert.Len(t, errs, 0) assert.IsType(t, &streams.ProcessorNode{}, n) assert.Equal(t, "test", n.(*streams.ProcessorNode).Name()) assert.Len(t, pn.Children(), 1) @@ -108,11 +110,32 @@ func TestTopologyBuilder_AddProcessor(t *testing.T) { assert.Equal(t, n, to.Processors()[0]) } +func TestTopologyBuilder_BuildChecksConnectedSources(t *testing.T) { + tb := streams.NewTopologyBuilder() + _ = tb.AddSource("src", new(MockSource)) + _ = tb.AddSource("src", new(MockSource)) + + _, errs := tb.Build() + + assert.Len(t, errs, 1) +} + +func TestTopologyBuilder_BuildChecksConnectedCommitters(t *testing.T) { + tb := streams.NewTopologyBuilder() + n1 := tb.AddProcessor("1", new(MockCommitter), []streams.Node{}) + n2 := tb.AddProcessor("1", new(MockProcessor), []streams.Node{n1}) + _ = tb.AddProcessor("1", new(MockCommitter), []streams.Node{n2}) + + _, errs := tb.Build() + + assert.Len(t, errs, 1) +} + func TestTopology_Sources(t *testing.T) { s := new(MockSource) tb := streams.NewTopologyBuilder() sn := tb.AddSource("test", s) - to := tb.Build() + to, _ := tb.Build() sources := to.Sources() @@ -124,7 +147,7 @@ func TestTopology_Processors(t *testing.T) { p := new(MockProcessor) tb := streams.NewTopologyBuilder() pn := tb.AddProcessor("test2", p, []streams.Node{}) - to := tb.Build() + to, _ := tb.Build() processors := to.Processors()