Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Matcher v2 init #541

Merged
merged 2 commits into from
Sep 5, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
84 changes: 84 additions & 0 deletions internal/sink/matcher.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,10 @@ type MatcherV1 struct {
CommonWebhook
}

type MatcherV2 struct {
matcher map[string]*regexp.Regexp
CommonWebhook
}
type CommonWebhook struct {
mutex sync.RWMutex
logger *zap.Logger
Expand All @@ -58,6 +62,13 @@ func NewMatcher(l ancla.Register, logger *zap.Logger) (Matcher, error) {
return nil, err
}
return m, nil
case *ancla.RegistryV2:
m := &MatcherV2{}
m.logger = logger
if err := m.update(*v); err != nil {
return nil, err
}
return m, nil
default:
return nil, fmt.Errorf("invalid listener")
}
Expand Down Expand Up @@ -194,3 +205,76 @@ func (m1 *MatcherV1) getUrls() (urls *ring.Ring) {
m1.urls = m1.urls.Next()
return
}

// Update applies user configurable values for the outbound sender when a
// webhook is registered
func (m2 *MatcherV2) update(l ancla.RegistryV2) error {

//TODO: don't believe the logger for webhook is being set anywhere just yet
m2.logger = m2.logger.With(zap.String("webhook.address", l.Registration.Address))

if l.Registration.FailureURL != "" {
_, err := url.ParseRequestURI(l.Registration.FailureURL)
if err != nil {
return err
}
}

//TODO: should we be checking that the l.Registration.Matcher.Field is a field in the wrp.Message?
matcher := make(map[string]*regexp.Regexp)
for _, item := range l.Registration.Matcher {
if item.Regex == ".*" {
// Match everything - skip the filtering
matcher[item.Field] = &regexp.Regexp{}
break
}

var re *regexp.Regexp
re, err := regexp.Compile(item.Regex)
if err != nil {
return fmt.Errorf("invalid matcher item: '%s'", item.Regex)
}
matcher[item.Field] = re
}

// write/update sink sender
m2.mutex.Lock()
defer m2.mutex.Unlock()

// if matcher list is empty set it nil for Queue() logic
if len(matcher) == 0 {
m2.matcher = nil
} else {
m2.matcher = matcher
}

return nil

}

func (m2 *MatcherV2) IsMatch(msg *wrp.Message) bool {
m2.mutex.RLock()
matcher := m2.matcher
m2.mutex.RUnlock()

var (
matchDevice = false
)
for field, deviceRegex := range matcher {
if deviceRegex.MatchString(field) || deviceRegex.MatchString(strings.TrimPrefix(field, "event:")) {
matchDevice = true
break
}
}

if !matchDevice {
m2.logger.Debug("device regex doesn't match", zap.String("event.source", msg.Source))
return false
}
return true
}

// TODO: this is a big reason why I want to refactor the Matcher logic
func (m2 *MatcherV2) getUrls() (urls *ring.Ring) {
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't believe MatcherV2 is going to need getUrls - in my refactored branch i move the urls to the matcher to get rid of this function which is also called here:

urls = s.matcher.getUrls()
and why I originally had getUrls a part of the interface

return
}
19 changes: 10 additions & 9 deletions internal/sink/sink.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,10 +52,9 @@ type Kafka struct {
}

func NewSink(c Config, logger *zap.Logger, listener ancla.Register) Sink {
var sink Sink
switch l := listener.(type) {
case *ancla.RegistryV1:
sink = &WebhookV1{
sink := &WebhookV1{
id: l.GetId(),
deliveryInterval: c.DeliveryInterval,
deliveryRetries: c.DeliveryRetries,
Expand All @@ -68,31 +67,32 @@ func NewSink(c Config, logger *zap.Logger, listener ancla.Register) Sink {
kafka := &Kafka{
id: l.Registration.CanonicalName,
brokerAddr: k.BootstrapServers,
topic: "test",
topic: "quickstart-events",
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this is based off the kafka docs for testing locally... i imagine we will be including topic as a part of the kafka webhook struct that users will be sending us?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yup

logger: logger,
}

config := sarama.NewConfig()
//TODO: this is basic set up for now - will need to add more options to config
//once we know what we are allowing users to send
kafka.config.Producer.Return.Successes = true
kafka.config.Producer.RequiredAcks = sarama.WaitForAll
kafka.config.Producer.Retry.Max = c.DeliveryRetries //should we be using retryhint for this?

config.Producer.Return.Successes = true
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I added back in the default config because it kept failing due to certain fields not being set. i'm setting these fields this way based on a tutorial I found: https://youtu.be/j6bqJKxb2w0?si=-htt-I0kgF8RqHMy&t=1384

config.Producer.RequiredAcks = sarama.WaitForAll
config.Producer.Retry.Max = c.DeliveryRetries //should we be using retryhint for this?
kafka.config = config
// Create a new Kafka producer
producer, err := sarama.NewSyncProducer(kafka.brokerAddr, config)
if err != nil {
kafka.logger.Error("Could not create Kafka producer", zap.Error(err))
return nil
}
defer producer.Close()
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.


kafka.producer = producer
sink = append(sink, kafka)
}

return sink
default:
return nil
}
return sink
}

func (v1 *WebhookV1) Update(l ancla.Register) (err error) {
Expand Down Expand Up @@ -346,6 +346,7 @@ func (k *Kafka) send(secret string, acceptType string, msg *wrp.Message) error {

// Send the message to Kafka
partition, offset, err := k.producer.SendMessage(kafkaMsg)
defer k.producer.Close()
if err != nil {
k.logger.Error("Failed to send message to Kafka", zap.Error(err))
return err
Expand Down