-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathkafka_consumer_group.go
83 lines (72 loc) · 2.41 KB
/
kafka_consumer_group.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
package main
import (
"encoding/json"
"fmt"
"github.com/Shopify/sarama"
log "github.com/Sirupsen/logrus"
"github.com/wvanbergen/kafka/consumergroup"
"strings"
)
const (
OFFSETS_PROCESSING_TIMEOUT_SECONDS = 10
OFFSETS_COMMIT_INTERVAL = 10
ZKRoot = ""
GroupName = ""
ProducerTopic = "topic"
)
var KafkaBrokerList = "127.0.0.1"
var ZKAddrs = []string{"127.0.0.1"}
var ConsumerFromTopics = []string{"topic"}
func main() {
//init consumer
config := consumergroup.NewConfig()
config.Offsets.Initial = sarama.OffsetOldest
config.Offsets.ProcessingTimeout = OFFSETS_PROCESSING_TIMEOUT_SECONDS
config.Offsets.CommitInterval = OFFSETS_COMMIT_INTERVAL
config.Zookeeper.Chroot = ZKRoot
cg, err := consumergroup.JoinConsumerGroup(GroupName, ConsumerFromTopics, ZKAddrs, config)
if err != nil {
return err
}
//deal consumer error
go func() {
for err := range cg.Errors() {
log.Println("consumer error(%v)", err)
}
}()
//init producer
producerConfig := sarama.NewConfig()
producerConfig.Producer.Partitioner = sarama.NewHashPartitioner
producer, err := sarama.NewSyncProducer(strings.Split(KafkaBrokerList, ","), producerConfig)
if err != nil {
log.Fatalln(err)
}
//convert
go func() {
for msg := range cg.Messages() {
//todo 协程池消费
log.Infof("topic:%s\n", (string)(msg.Topic))
log.Infof("key:%s\n", (string)(msg.Key))
log.Debugf("value:%s\n", (string)(msg.Value))
log.Infof("partition:%d\n", msg.Partition)
log.Infof("Msg.offset:%d\n", msg.Offset)
msgByte, _ := json.Marshal(msg)
//produce
kafkaMsg := &sarama.ProducerMessage{Topic: "test-topic", Key: sarama.StringEncoder("key"), Value: sarama.StringEncoder(msgByte)}
partition, offset, err := producer.SendMessage(kafkaMsg)
var info string
if err != nil {
info = fmt.Sprintf("FAILED to send topic %s message: %s", ProducerTopic, string(msgByte), err)
} else {
info = fmt.Sprintf("message sent to topic %s partition %d at offset %d", ProducerTopic, partition, offset, string(msgByte))
}
log.Infof(info)
//Zookeeper only keeps track of the latest offset that has been consumed, and assumes that everything before that offset has been handled.
//It doesn't keep track of every individual message. //notice offset and retrive message from patition
//ack
cg.CommitUpto(msg)
cg.FlushOffsets()
}
}()
return nil
}