-
Notifications
You must be signed in to change notification settings - Fork 1
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request #4 from driessamyn/open-api
Open api
- Loading branch information
Showing
16 changed files
with
330 additions
and
103 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,10 +1,20 @@ | ||
package kafkasnoop | ||
|
||
import org.apache.kafka.clients.consumer.ConsumerConfig | ||
import org.apache.kafka.clients.consumer.KafkaConsumer | ||
import org.slf4j.LoggerFactory | ||
import java.util.Properties | ||
import java.util.UUID | ||
|
||
class KafkaClientFactory(private val props: Properties) { | ||
companion object { | ||
private val logger = LoggerFactory.getLogger(KafkaClientFactory::class.java) | ||
} | ||
|
||
fun createConsumer(): KafkaConsumer<ByteArray, ByteArray> { | ||
val consumerId = "kafkasnoop-consumer-${UUID.randomUUID()}" | ||
logger.info("Create consumer $consumerId") | ||
props.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, consumerId) | ||
return KafkaConsumer<ByteArray, ByteArray>(props) | ||
} | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,7 +1,11 @@ | ||
package kafkasnoop.dto | ||
|
||
data class Message(val partition: String, val key: String, val value: String) { | ||
import com.papsign.ktor.openapigen.annotations.Response | ||
import java.time.Instant | ||
|
||
@Response("A Kafka Message") | ||
data class Message(val offset: Long, val partition: String, val key: String, val value: String, val timestamp: Instant) { | ||
override fun toString(): String { | ||
return "$partition|$key|$value" | ||
return "$offset|$partition|$key|$value|$timestamp" | ||
} | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,4 +1,15 @@ | ||
package kafkasnoop.dto | ||
|
||
data class Partition(val index: Int, val inSyncReplicas: Int, val offlineReplicas: Int) | ||
import com.papsign.ktor.openapigen.annotations.Response | ||
|
||
@Response("Kafka Topic Partition") | ||
data class Partition( | ||
val index: Int, | ||
val beginOffset: Long, | ||
val endOffset: Long, | ||
val inSyncReplicas: Int, | ||
val offlineReplicas: Int, | ||
) | ||
|
||
@Response("Kafka Topic") | ||
data class Topic(val name: String, val partitions: List<Partition>) |
93 changes: 64 additions & 29 deletions
93
http/src/main/kotlin/kafkasnoop/routes/MessageProcessor.kt
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,50 +1,85 @@ | ||
package kafkasnoop.routes | ||
|
||
import kafkasnoop.KafkaClientFactory | ||
import kafkasnoop.dto.Message | ||
import kotlinx.coroutines.yield | ||
import org.apache.kafka.clients.consumer.KafkaConsumer | ||
import org.apache.kafka.common.TopicPartition | ||
import java.time.Duration | ||
import java.time.Instant | ||
import kotlin.math.max | ||
|
||
/** | ||
* Message processor | ||
* | ||
* NOTE: changed this to use 1 consumer per partition. | ||
* According to the docs, reading assigning multiple partitions to a consumer without a cosnumer group should | ||
* be fine, but it appeared unreliable. This probably means something was wrong in my code, but will come back to this. | ||
*/ | ||
class MessageProcessor( | ||
private val kafkaConsumer: KafkaConsumer<ByteArray, ByteArray>, | ||
private val kafkaClientFactory: KafkaClientFactory, | ||
private val topicName: String, | ||
// todo: take offset from query string & filter partitions | ||
private val startOffset: Long = 0L, | ||
) { | ||
private val partitions: List<TopicPartition> | ||
) : AutoCloseable { | ||
val partitions: List<TopicPartition> | ||
@Volatile | ||
private var isClosed = false | ||
init { | ||
logger.debug("Getting messages for $topicName") | ||
|
||
partitions = kafkaConsumer.partitionsFor(topicName).map { | ||
TopicPartition(it.topic(), it.partition()) | ||
} | ||
kafkaConsumer.assign(partitions) | ||
val beggingOffsets = kafkaConsumer.beginningOffsets(partitions) | ||
|
||
partitions.forEach { p -> | ||
val pOffset = java.lang.Long.max(beggingOffsets[p] ?: 0, startOffset) | ||
logger.info("Begging offset for partition $p is $pOffset") | ||
kafkaConsumer.seek(p, pOffset) | ||
kafkaClientFactory.createConsumer().use { | ||
partitions = it.partitionsFor(topicName).map { | ||
TopicPartition(it.topic(), it.partition()) | ||
} | ||
} | ||
} | ||
|
||
fun startProcess(maxMsgCount: Int = Int.MAX_VALUE) = | ||
fun startProcess(partition: TopicPartition, maxMsgCount: Int = Int.MAX_VALUE, minOffset: Long = 0L) = | ||
sequence { | ||
var msgCount = 0 | ||
while (msgCount < maxMsgCount) { | ||
partitions.forEach { partition -> | ||
kafkaConsumer | ||
.poll(Duration.ofMillis(100)).records(partition) | ||
.forEach { record -> | ||
kafkaClientFactory.createConsumer().use { kafkaConsumer -> | ||
kafkaConsumer.assign(listOf(partition)) | ||
val beggingOffsets = kafkaConsumer.beginningOffsets(partitions) | ||
val endOffsets = kafkaConsumer.endOffsets(partitions) | ||
|
||
// default to rewinding to 5 or max msg count | ||
val offsetDiff = if (maxMsgCount == Int.MAX_VALUE) 5 else maxMsgCount | ||
logger.debug("Min offset for partition $partition is ${beggingOffsets[partition]}") | ||
logger.debug("Max offset for partition $partition is ${endOffsets[partition]}") | ||
val startOffset = max(endOffsets[partition]?.minus(offsetDiff) ?: 0L, 0L) | ||
val offset = max(startOffset, minOffset) | ||
val messageCount = max(endOffsets.getOrDefault(partition, 0) - offset, maxMsgCount.toLong()) | ||
logger.info("Loading $messageCount from $partition starting at $offset") | ||
kafkaConsumer.seek(partition, offset) | ||
|
||
var messagesLoaded = 0 | ||
var emptyPolls = 0 | ||
// TODO: tidy-up this logic. | ||
while (!isClosed && (maxMsgCount == Int.MAX_VALUE || emptyPolls <= 5) && messagesLoaded < messageCount) { | ||
logger.debug("Polling $partition from ${kafkaConsumer.position(partition)}") | ||
val msgs = kafkaConsumer | ||
.poll(Duration.ofMillis(200)).records(partition) | ||
.map { record -> | ||
logger.debug("Found message $partition: ${record.offset()}") | ||
val key = String(record.key(), Charsets.UTF_8) | ||
val value = String(record.value(), Charsets.UTF_8) | ||
val msg = Message(partition.toString(), key, value) | ||
yield(msg) | ||
msgCount += 1 | ||
Message(record.offset(), partition.toString(), key, value, Instant.ofEpochMilli(record.timestamp())) | ||
} | ||
|
||
if (msgs.isEmpty()) { | ||
emptyPolls += 1 | ||
logger.debug("Empty polls: $emptyPolls") | ||
Thread.sleep(200) | ||
} else { | ||
logger.debug("Found ${msgs.count()} on $topicName: ${msgs.groupBy { it.partition }.map { it.key to it.value.maxOf { it.offset } }.toMap()}") | ||
val sortedMsgs = msgs.sortedBy { it.offset } | ||
logger.debug("Found $sortedMsgs on $partition") | ||
yieldAll(sortedMsgs) | ||
emptyPolls = 0 | ||
} | ||
messagesLoaded += msgs.count() | ||
logger.debug("Loaded $messagesLoaded out of $messageCount") | ||
} | ||
logger.debug("stopping to process") | ||
} | ||
logger.debug("stopping to process") | ||
} | ||
|
||
override fun close() { | ||
isClosed = true | ||
} | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,45 +1,51 @@ | ||
package kafkasnoop.routes | ||
|
||
import io.ktor.application.* | ||
import io.ktor.http.cio.websocket.* | ||
import io.ktor.response.* | ||
import io.ktor.routing.* | ||
import io.ktor.websocket.* | ||
import com.papsign.ktor.openapigen.annotations.parameters.PathParam | ||
import com.papsign.ktor.openapigen.annotations.parameters.QueryParam | ||
import com.papsign.ktor.openapigen.route.info | ||
import com.papsign.ktor.openapigen.route.path.normal.NormalOpenAPIRoute | ||
import com.papsign.ktor.openapigen.route.path.normal.get | ||
import com.papsign.ktor.openapigen.route.response.respond | ||
import kafkasnoop.KafkaClientFactory | ||
|
||
fun Route.messages(kafkaClientFactory: KafkaClientFactory) { | ||
|
||
webSocket("/ws/{topic}") { | ||
call.run { | ||
val topicName = call.parameters["topic"] ?: throw IllegalArgumentException("Topic must be provided") | ||
|
||
// todo: take partition, limit and offset from query string | ||
val offset = 0L | ||
|
||
kafkaClientFactory.createConsumer().use { consumer -> | ||
MessageProcessor(consumer, topicName, offset) | ||
.startProcess().forEach { | ||
send( | ||
Frame.Text(it.toString()) | ||
) | ||
} | ||
} | ||
} | ||
} | ||
|
||
get("/api/{topic}") { | ||
call.run { | ||
val topicName = call.parameters["topic"] ?: throw IllegalArgumentException("Topic must be provided") | ||
|
||
// todo: take partition, limit and offset from query string | ||
val maxMsg = 100 | ||
val offset = 0L | ||
|
||
kafkaClientFactory.createConsumer().use { consumer -> | ||
val msgs = MessageProcessor(consumer, topicName, offset) | ||
.startProcess(maxMsg).toList() | ||
respond(msgs) | ||
} | ||
import kafkasnoop.dto.Message | ||
import java.time.Instant | ||
|
||
private const val MAX_MESSAGES_DEFAULT = 10 | ||
private const val MIN_OFFSET_DEFAULT = 0L | ||
data class GetTopicMessagesParams( | ||
@PathParam("Name of the topic") | ||
val topic: String, | ||
@QueryParam("Partition filter (Optional)") | ||
val partition: Int?, | ||
@QueryParam("Maximum number of messages to return per partition - Optional, default: $MAX_MESSAGES_DEFAULT") | ||
val max: Int?, | ||
@QueryParam("Minimum offset to start returning messages from - Optional, default: $MIN_OFFSET_DEFAULT") | ||
val minOffset: Long? | ||
) | ||
fun NormalOpenAPIRoute.messages(kafkaClientFactory: KafkaClientFactory) { | ||
get<GetTopicMessagesParams, List<Message>>( | ||
info("Messages", "Get Messages from given topic"), | ||
example = listOf( | ||
Message( | ||
0, | ||
"topic-parition", | ||
"message-key", | ||
"message-value", Instant.now() | ||
) | ||
) | ||
) { params -> | ||
|
||
MessageProcessor(kafkaClientFactory, params.topic).use { processor -> | ||
val msgs = processor.partitions | ||
.filter { null == params.partition || it.partition() == params.partition } | ||
.map { p -> | ||
processor.startProcess( | ||
p, | ||
params.max ?: MAX_MESSAGES_DEFAULT, | ||
params.minOffset ?: MIN_OFFSET_DEFAULT | ||
).toList().sortedBy { it.offset } | ||
}.flatten().sortedByDescending { it.timestamp } | ||
respond(msgs) | ||
} | ||
} | ||
} |
Oops, something went wrong.