Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Kafka with KRaft #69

Open
wants to merge 13 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
61 changes: 61 additions & 0 deletions config/kafka/kraft.properties
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
#Listener config
# 2 listeners configured for oauth2 (1 internal docker and 1 external), 2 listeners for no auth (1 internal docker and 1 external), 1 listener for redpanda which uses SCRAM-SHA-512 auth.
listeners=auth-listener-docker-internal://kafka:19092,auth-listener-docker-external://0.0.0.0:9092,listener-docker-internal://kafka:19093,listener-docker-external://0.0.0.0:9093,redpanda://kafka:19094,CONTROLLER://kafka:9095
advertised.listeners=auth-listener-docker-internal://kafka:19092,auth-listener-docker-external://127.0.0.1:9092,listener-docker-internal://kafka:19093,listener-docker-external://127.0.0.1:9093,redpanda://kafka:19094
inter.broker.listener.name=listener-docker-internal
listener.security.protocol.map=auth-listener-docker-internal:SASL_PLAINTEXT,auth-listener-docker-external:SASL_PLAINTEXT,listener-docker-internal:PLAINTEXT,listener-docker-external:PLAINTEXT,redpanda:SASL_PLAINTEXT,CONTROLLER:PLAINTEXT
sasl.mechanism.inter.broker.protocol=OAUTHBEARER

# Internal docker oauth2 client, which is also used for inter-broker communication.
listener.name.auth-listener-docker-internal.sasl.enabled.mechanisms=OAUTHBEARER
listener.name.auth-listener-docker-internal.oauthbearer.sasl.jaas.config=org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required \
oauth.client.id="default-access" \
oauth.client.secret="default-access-secret" \
oauth.scope="kafka" \
oauth.token.endpoint.uri="http://keycloak:1852/realms/local-development/protocol/openid-connect/token" \
oauth.jwks.endpoint.uri="http://keycloak:1852/realms/local-development/protocol/openid-connect/certs" \
oauth.check.issuer="false" \
oauth.username.claim="sub" \
unsecuredLoginStringClaim_sub="unused";
listener.name.auth-listener-docker-internal.oauthbearer.sasl.server.callback.handler.class=io.strimzi.kafka.oauth.server.JaasServerOauthValidatorCallbackHandler
listener.name.auth-listener-docker-internal.oauthbearer.sasl.login.callback.handler.class=io.strimzi.kafka.oauth.client.JaasClientOauthLoginCallbackHandler

# External oauth2 client
listener.name.auth-listener-docker-external.sasl.enabled.mechanisms=OAUTHBEARER
listener.name.auth-listener-docker-external.oauthbearer.sasl.jaas.config=org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required \
oauth.jwks.endpoint.uri="http://keycloak:1852/realms/local-development/protocol/openid-connect/certs" \
oauth.check.issuer="false" \
oauth.username.claim="sub" \
unsecuredLoginStringClaim_sub="unused";
listener.name.auth-listener-docker-external.oauthbearer.sasl.server.callback.handler.class=io.strimzi.kafka.oauth.server.JaasServerOauthValidatorCallbackHandler

# Internal basic auth client for redpanda.
listener.name.redpanda.sasl.enabled.mechanisms=SCRAM-SHA-512
listener.name.redpanda.scram-sha-512.sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required;

# Authorization configuration
principal.builder.class=io.strimzi.kafka.oauth.server.OAuthKafkaPrincipalBuilder
authorizer.class.name=com.trifork.cheetah.CheetahKRaftAuthorizer

# General config
broker.id=1
min.insync.replicas=1
default.replication.factor=1
transaction.state.log.replication.factor=1
offsets.topic.replication.factor=1

# Users to bypass CheetahKafkaAuthorizer.
# User:ANONYMOUS can be removed if you want to require authentication.
super.users=User:redpanda;User:ANONYMOUS

# Cheetah config
cheetah.authorization.claim.name=roles
cheetah.authorization.prefix=Kafka_
cheetah.authorization.claim.is-list=true

# KRaft config
process.roles=broker,controller
node.id=1
controller.quorum.voters=1@kafka:9095
controller.listener.names=CONTROLLER
cluster.id=iBzfyk2mT5qGMuO9Yu0WrQ
42 changes: 8 additions & 34 deletions docker-compose/kafka.yaml
Original file line number Diff line number Diff line change
@@ -1,58 +1,33 @@
---
services:
zookeeper:
mem_limit: 256m
container_name: zookeeper
image: quay.io/strimzi/kafka:0.43.0-kafka-3.7.1
command:
["sh", "-c", "bin/zookeeper-server-start.sh config/zookeeper.properties"]
ports:
- "2181:2181"
environment:
ZOOKEEPER_CLIENT_PORT: 2181
LOG_DIR: /tmp/logs
restart: unless-stopped
profiles:
- kafka-core
- schema-registry-core
- core
- kafka
- observability
- full

kafka:
cpus: 2
image: ${CHEETAH_DOCKER_REGISTRY-}cheetah-infrastructure-utils-kafka:strimzi-0.45.0-kafka-3.9.0-trifork-1.7.0-RC.2
container_name: kafka
image: ${CHEETAH_DOCKER_REGISTRY-}cheetah-infrastructure-utils-kafka:strimzi-0.43.0-kafka-3.7.1-trifork-1.7.0
hostname: kafka
mem_limit: 1024m
command:
[
"sh",
"-c",
"bin/kafka-server-start.sh /opt/kafka/custom-config/server.properties",
"bin/kafka-storage.sh format --ignore-formatted --cluster-id iBzfyk2mT5qGMuO9Yu0WrQ --config /opt/kafka/custom-config/kraft.properties && bin/kafka-server-start.sh /opt/kafka/custom-config/kraft.properties",
]
depends_on:
zookeeper:
condition: service_started
keycloak-setup:
condition: service_completed_successfully
ports:
- "9092:9092" # Requires OAuth2 authentication
- "9093:9093" # Requires No authentication
expose:
- "19092"
- "9093"
environment:
LOG_DIR: "/tmp/logs"
KAFKA_LOG4J_OPTS: "-Dlog4j.configuration=file:/opt/kafka/custom-config/log4j.properties" # enables debug logging
KAFKA_ROOT_LOGGER_LEVEL: debug
volumes:
- kafka:/var/lib/kafka # enable for persistent storage
- ./config/kafka/server.properties:/opt/kafka/custom-config/server.properties
#- ./config/kafka/log4j.properties:/opt/kafka/custom-config/log4j.properties
- ./config/kafka/log4j.cluster.properties:/opt/kafka/custom-config/log4j.properties
- ./config/kafka/kraft.properties:/opt/kafka/custom-config/kraft.properties
# - ./config/kafka/log4j.properties:/opt/kafka/custom-config/log4j.properties # enables debug logging
restart: unless-stopped
healthcheck:
test: nc -z kafka 19092 || exit -1
test: nc -z kafka 9093 || exit -1
interval: 30s # (default: 30s)
timeout: 10s # (default: 30s)
retries: 3 # (default: 3)
Expand All @@ -66,8 +41,7 @@ services:
- full

kafka-setup:
mem_limit: 512m
image: quay.io/strimzi/kafka:0.43.0-kafka-3.7.1
image: quay.io/strimzi/kafka:0.45.0-kafka-3.9.0
container_name: kafka-setup
volumes:
- ./config/kafkaconfig.sh:/etc/config/kafkaconfig.sh
Expand Down
1 change: 0 additions & 1 deletion readme.md
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,6 @@ See sections below for details on security model configuration.
The kafka setup consists of different services:

- **kafka** - Strimzi Kafka with the [Cheetah Kafka Authorizer](https://github.com/trifork/cheetah-infrastructure-utils-kafka)
- **zookeeper** - Strimzi Zookeeper
- **redpanda** - A Console provides a user interface to manage multiple Kafka connect clusters. <https://docs.redpanda.com/docs/manage/console/>
- **kafka-setup** - A bash script which sets up a Kafka User for redpanda to use when connecting to Kafka, as well as some predefined topics. The topics to be created are determined by the environment variable INITIAL_KAFKA_TOPICS, which can be set in the `.env` file or overritten in your local environment.
- **schema-registry** - [Schema registry](https://www.apicur.io/registry/docs/apicurio-registry/2.5.x/index.html)
Expand Down