diff --git a/.github/ISSUE_TEMPLATE/bug_report.yaml b/.github/ISSUE_TEMPLATE/bug_report.yaml
index fed1e5d8766..7cd96be44b5 100644
--- a/.github/ISSUE_TEMPLATE/bug_report.yaml
+++ b/.github/ISSUE_TEMPLATE/bug_report.yaml
@@ -57,6 +57,7 @@ body:
- QuestDB
- RabbitMQ
- Redpanda
+ - ScyllaDB
- Selenium
- Solace
- Solr
diff --git a/.github/ISSUE_TEMPLATE/enhancement.yaml b/.github/ISSUE_TEMPLATE/enhancement.yaml
index c89fc29208c..d253d6bd40b 100644
--- a/.github/ISSUE_TEMPLATE/enhancement.yaml
+++ b/.github/ISSUE_TEMPLATE/enhancement.yaml
@@ -57,6 +57,7 @@ body:
- QuestDB
- RabbitMQ
- Redpanda
+ - ScyllaDB
- Selenium
- Solace
- Solr
diff --git a/.github/ISSUE_TEMPLATE/feature.yaml b/.github/ISSUE_TEMPLATE/feature.yaml
index aa9bf4e7777..8ab49adc80b 100644
--- a/.github/ISSUE_TEMPLATE/feature.yaml
+++ b/.github/ISSUE_TEMPLATE/feature.yaml
@@ -57,6 +57,7 @@ body:
- Pulsar
- RabbitMQ
- Redpanda
+ - ScyllaDB
- Selenium
- Solace
- Solr
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
index 17dd0e2aa05..d4191bba9cf 100644
--- a/.github/dependabot.yml
+++ b/.github/dependabot.yml
@@ -310,6 +310,11 @@ updates:
schedule:
interval: "weekly"
open-pull-requests-limit: 10
+ - package-ecosystem: "gradle"
+ directory: "/modules/scylladb"
+ schedule:
+ interval: "weekly"
+ open-pull-requests-limit: 10
- package-ecosystem: "gradle"
directory: "/modules/selenium"
schedule:
diff --git a/.github/labeler.yml b/.github/labeler.yml
index 537f40a944b..f5f820afff9 100644
--- a/.github/labeler.yml
+++ b/.github/labeler.yml
@@ -200,6 +200,10 @@
- changed-files:
- any-glob-to-any-file:
- modules/redpanda/**/*
+"modules/scylladb":
+ - changed-files:
+ - any-glob-to-any-file:
+ - modules/scylladb/**/*
"modules/selenium":
- changed-files:
- any-glob-to-any-file:
diff --git a/docs/modules/databases/scylladb.md b/docs/modules/databases/scylladb.md
new file mode 100644
index 00000000000..b58e62264f9
--- /dev/null
+++ b/docs/modules/databases/scylladb.md
@@ -0,0 +1,62 @@
+# ScyllaDB
+
+Testcontainers module for [ScyllaDB](https://hub.docker.com/r/scylladb/scylla)
+
+## ScyllaDB's usage examples
+
+You can start a ScyllaDB container instance from any Java application by using:
+
+
+[Create container](../../../modules/scylladb/src/test/java/org/testcontainers/scylladb/ScyllaDBContainerTest.java) inside_block:container
+
+
+
+[Custom config file](../../../modules/scylladb/src/test/java/org/testcontainers/scylladb/ScyllaDBContainerTest.java) inside_block:custom_configuration
+
+
+### Building CqlSession
+
+
+[Using CQL port](../../../modules/scylladb/src/test/java/org/testcontainers/scylladb/ScyllaDBContainerTest.java) inside_block:session
+
+
+
+[Using SSL](../../../modules/scylladb/src/test/java/org/testcontainers/scylladb/ScyllaDBContainerTest.java) inside_block:sslContext
+
+
+
+[Using SSL with cqlsh](../../../modules/scylladb/src/test/java/org/testcontainers/scylladb/ScyllaDBContainerTest.java) inside_block:sslCqlsh
+
+
+
+[Using Shard Awareness port](../../../modules/scylladb/src/test/java/org/testcontainers/scylladb/ScyllaDBContainerTest.java) inside_block:shardAwarenessSession
+
+
+### Alternator
+
+
+[Enabling Alternator](../../../modules/scylladb/src/test/java/org/testcontainers/scylladb/ScyllaDBContainerTest.java) inside_block:alternator
+
+
+
+[DynamoDbClient with Alternator](../../../modules/scylladb/src/test/java/org/testcontainers/scylladb/ScyllaDBContainerTest.java) inside_block:dynamodDbClient
+
+
+## Adding this module to your project dependencies
+
+Add the following dependency to your `pom.xml`/`build.gradle` file:
+
+=== "Gradle"
+ ```groovy
+ testImplementation "org.testcontainers:scylladb:{{latest_version}}"
+ ```
+
+=== "Maven"
+ ```xml
+
+ org.testcontainers
+ scylladb
+ {{latest_version}}
+ test
+
+ ```
diff --git a/mkdocs.yml b/mkdocs.yml
index c308128969b..946b0053f58 100644
--- a/mkdocs.yml
+++ b/mkdocs.yml
@@ -72,6 +72,7 @@ nav:
- modules/databases/postgres.md
- modules/databases/presto.md
- modules/databases/questdb.md
+ - modules/databases/scylladb.md
- modules/databases/tidb.md
- modules/databases/timeplus.md
- modules/databases/trino.md
diff --git a/modules/scylladb/build.gradle b/modules/scylladb/build.gradle
new file mode 100644
index 00000000000..069a79cf36a
--- /dev/null
+++ b/modules/scylladb/build.gradle
@@ -0,0 +1,9 @@
+description = "Testcontainers :: ScyllaDB"
+
+dependencies {
+ api project(":database-commons")
+ api "com.scylladb:java-driver-core:4.15.0.0"
+
+ testImplementation 'org.assertj:assertj-core:3.24.2'
+ testImplementation 'software.amazon.awssdk:dynamodb:2.28.6'
+}
diff --git a/modules/scylladb/src/main/java/org/testcontainers/scylladb/ScyllaDBContainer.java b/modules/scylladb/src/main/java/org/testcontainers/scylladb/ScyllaDBContainer.java
new file mode 100644
index 00000000000..f255eda2717
--- /dev/null
+++ b/modules/scylladb/src/main/java/org/testcontainers/scylladb/ScyllaDBContainer.java
@@ -0,0 +1,101 @@
+package org.testcontainers.scylladb;
+
+import org.testcontainers.containers.GenericContainer;
+import org.testcontainers.containers.wait.strategy.Wait;
+import org.testcontainers.utility.DockerImageName;
+import org.testcontainers.utility.MountableFile;
+
+import java.net.InetSocketAddress;
+import java.util.Optional;
+
+/**
+ * Testcontainers implementation for ScyllaDB.
+ *
+ * Supported image: {@code scylladb/scylla}
+ *
+ * Exposed ports:
+ *
+ * - CQL Port: 9042
+ * - Shard Aware Port: 19042
+ * - Alternator Port: 8000
+ *
+ */
+public class ScyllaDBContainer extends GenericContainer {
+
+ private static final DockerImageName DEFAULT_IMAGE_NAME = DockerImageName.parse("scylladb/scylla");
+
+ private static final Integer CQL_PORT = 9042;
+
+ private static final Integer SHARD_AWARE_PORT = 19042;
+
+ private static final Integer ALTERNATOR_PORT = 8000;
+
+ private static final String COMMAND = "--developer-mode=1 --overprovisioned=1";
+
+ private static final String CONTAINER_CONFIG_LOCATION = "/etc/scylla";
+
+ private boolean alternatorEnabled = false;
+
+ private String configLocation;
+
+ public ScyllaDBContainer(String dockerImageName) {
+ this(DockerImageName.parse(dockerImageName));
+ }
+
+ public ScyllaDBContainer(DockerImageName dockerImageName) {
+ super(dockerImageName);
+ dockerImageName.assertCompatibleWith(DEFAULT_IMAGE_NAME);
+
+ withExposedPorts(CQL_PORT, SHARD_AWARE_PORT);
+
+ withCommand(COMMAND);
+ waitingFor(Wait.forLogMessage(".*initialization completed..*", 1));
+ }
+
+ @Override
+ protected void configure() {
+ if (this.alternatorEnabled) {
+ addExposedPort(8000);
+ String newCommand =
+ COMMAND + " --alternator-port=" + ALTERNATOR_PORT + " --alternator-write-isolation=always";
+ withCommand(newCommand);
+ }
+
+ // Map (effectively replace) directory in Docker with the content of resourceLocation if resource location is
+ // not null.
+ Optional
+ .ofNullable(configLocation)
+ .map(MountableFile::forClasspathResource)
+ .ifPresent(mountableFile -> withCopyFileToContainer(mountableFile, CONTAINER_CONFIG_LOCATION));
+ }
+
+ public ScyllaDBContainer withConfigurationOverride(String configLocation) {
+ this.configLocation = configLocation;
+ return this;
+ }
+
+ public ScyllaDBContainer withAlternator() {
+ this.alternatorEnabled = true;
+ return this;
+ }
+
+ /**
+ * Retrieve an {@link InetSocketAddress} for connecting to the ScyllaDB container via the driver.
+ *
+ * @return A InetSocketAddress representation of this ScyllaDB container's host and port.
+ */
+ public InetSocketAddress getContactPoint() {
+ return new InetSocketAddress(getHost(), getMappedPort(CQL_PORT));
+ }
+
+ public InetSocketAddress getShardAwareContactPoint() {
+ return new InetSocketAddress(getHost(), getMappedPort(SHARD_AWARE_PORT));
+ }
+
+ public String getAlternatorEndpoint() {
+ if (!this.alternatorEnabled) {
+ throw new IllegalStateException("Alternator is not enabled");
+ }
+ return "http://" + getHost() + ":" + getMappedPort(ALTERNATOR_PORT);
+ }
+}
diff --git a/modules/scylladb/src/test/java/org/testcontainers/scylladb/ScyllaDBContainerTest.java b/modules/scylladb/src/test/java/org/testcontainers/scylladb/ScyllaDBContainerTest.java
new file mode 100644
index 00000000000..d7e4f921976
--- /dev/null
+++ b/modules/scylladb/src/test/java/org/testcontainers/scylladb/ScyllaDBContainerTest.java
@@ -0,0 +1,188 @@
+package org.testcontainers.scylladb;
+
+import com.datastax.oss.driver.api.core.CqlSession;
+import com.datastax.oss.driver.api.core.cql.ResultSet;
+import org.junit.Test;
+import org.testcontainers.containers.Container;
+import org.testcontainers.utility.DockerImageName;
+import org.testcontainers.utility.MountableFile;
+import software.amazon.awssdk.auth.credentials.AwsBasicCredentials;
+import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider;
+import software.amazon.awssdk.regions.Region;
+import software.amazon.awssdk.services.dynamodb.DynamoDbClient;
+import software.amazon.awssdk.services.dynamodb.model.AttributeDefinition;
+import software.amazon.awssdk.services.dynamodb.model.BillingMode;
+import software.amazon.awssdk.services.dynamodb.model.CreateTableRequest;
+import software.amazon.awssdk.services.dynamodb.model.KeySchemaElement;
+import software.amazon.awssdk.services.dynamodb.model.KeyType;
+import software.amazon.awssdk.services.dynamodb.model.ScalarAttributeType;
+
+import java.io.IOException;
+import java.net.URI;
+import java.nio.file.Files;
+import java.nio.file.Paths;
+import java.security.KeyManagementException;
+import java.security.KeyStore;
+import java.security.KeyStoreException;
+import java.security.NoSuchAlgorithmException;
+import java.security.UnrecoverableKeyException;
+import java.security.cert.CertificateException;
+
+import javax.net.ssl.KeyManagerFactory;
+import javax.net.ssl.SSLContext;
+import javax.net.ssl.TrustManagerFactory;
+
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.assertThatThrownBy;
+
+public class ScyllaDBContainerTest {
+
+ private static final DockerImageName SCYLLADB_IMAGE = DockerImageName.parse("scylladb/scylla:6.2");
+
+ private static final String BASIC_QUERY = "SELECT release_version FROM system.local";
+
+ @Test
+ public void testSimple() {
+ try ( // container {
+ ScyllaDBContainer scylladb = new ScyllaDBContainer(SCYLLADB_IMAGE)
+ // }
+ ) {
+ scylladb.start();
+ // session {
+ CqlSession session = CqlSession
+ .builder()
+ .addContactPoint(scylladb.getContactPoint())
+ .withLocalDatacenter("datacenter1")
+ .build();
+ // }
+ ResultSet resultSet = session.execute(BASIC_QUERY);
+ assertThat(resultSet.wasApplied()).isTrue();
+ assertThat(resultSet.one().getString(0)).isNotNull();
+ assertThat(session.getMetadata().getNodes().values()).hasSize(1);
+ }
+ }
+
+ @Test
+ public void testSimpleSsl()
+ throws InterruptedException, NoSuchAlgorithmException, KeyStoreException, IOException, CertificateException, UnrecoverableKeyException, KeyManagementException {
+ try (
+ // custom_configuration {
+ ScyllaDBContainer scylladb = new ScyllaDBContainer(SCYLLADB_IMAGE)
+ .withConfigurationOverride("scylla-test-ssl")
+ // }
+ ) {
+ // sslContext {
+ String testResourcesDir = getClass().getClassLoader().getResource("scylla-test-ssl/").getPath();
+
+ KeyStore keyStore = KeyStore.getInstance("PKCS12");
+ keyStore.load(Files.newInputStream(Paths.get(testResourcesDir + "keystore.node0")), "scylla".toCharArray());
+
+ KeyStore trustStore = KeyStore.getInstance("PKCS12");
+ trustStore.load(
+ Files.newInputStream(Paths.get(testResourcesDir + "truststore.node0")),
+ "scylla".toCharArray()
+ );
+
+ KeyManagerFactory keyManagerFactory = KeyManagerFactory.getInstance(
+ KeyManagerFactory.getDefaultAlgorithm()
+ );
+ keyManagerFactory.init(keyStore, "scylla".toCharArray());
+
+ TrustManagerFactory trustManagerFactory = TrustManagerFactory.getInstance(
+ TrustManagerFactory.getDefaultAlgorithm()
+ );
+ trustManagerFactory.init(trustStore);
+
+ SSLContext sslContext = SSLContext.getInstance("TLS");
+ sslContext.init(keyManagerFactory.getKeyManagers(), trustManagerFactory.getTrustManagers(), null);
+ // }
+
+ scylladb.start();
+
+ CqlSession session = CqlSession
+ .builder()
+ .addContactPoint(scylladb.getContactPoint())
+ .withLocalDatacenter("datacenter1")
+ .withSslContext(sslContext)
+ .build();
+ ResultSet resultSet = session.execute(BASIC_QUERY);
+ assertThat(resultSet.wasApplied()).isTrue();
+ assertThat(resultSet.one().getString(0)).isNotNull();
+ assertThat(session.getMetadata().getNodes().values()).hasSize(1);
+ }
+ }
+
+ @Test
+ public void testSimpleSslCqlsh() throws InterruptedException, IOException {
+ try (
+ ScyllaDBContainer scylladb = new ScyllaDBContainer(SCYLLADB_IMAGE)
+ .withConfigurationOverride("scylla-test-ssl")
+ ) {
+ scylladb.start();
+
+ // sslCqlsh {
+ scylladb.execInContainer("mv", "-f", "/etc/scylla/cqlshrc", "/root/.cassandra/cqlshrc");
+ Container.ExecResult execResult = scylladb.execInContainer("cqlsh", "--ssl", "-e", "select * from system_schema.keyspaces;");
+ assertThat(execResult.getStdout()).contains("keyspace_name");
+ // }
+ }
+ }
+
+ @Test
+ public void testShardAwareness() {
+ try (ScyllaDBContainer scylladb = new ScyllaDBContainer(SCYLLADB_IMAGE)) {
+ scylladb.start();
+ // shardAwarenessSession {
+ CqlSession session = CqlSession
+ .builder()
+ .addContactPoint(scylladb.getShardAwareContactPoint())
+ .withLocalDatacenter("datacenter1")
+ .build();
+ // }
+ ResultSet resultSet = session.execute("SELECT driver_name FROM system.clients");
+ assertThat(resultSet.one().getString(0)).isNotNull();
+ assertThat(session.getMetadata().getNodes().values()).hasSize(1);
+ }
+ }
+
+ @Test
+ public void testAlternator() {
+ try ( // alternator {
+ ScyllaDBContainer scylladb = new ScyllaDBContainer(SCYLLADB_IMAGE).withAlternator()
+ // }
+ ) {
+ scylladb.start();
+
+ // dynamodDbClient {
+ DynamoDbClient client = DynamoDbClient
+ .builder()
+ .endpointOverride(URI.create(scylladb.getAlternatorEndpoint()))
+ .credentialsProvider(StaticCredentialsProvider.create(AwsBasicCredentials.create("test", "test")))
+ .region(Region.US_EAST_1)
+ .build();
+ // }
+ client.createTable(
+ CreateTableRequest
+ .builder()
+ .tableName("demo_table")
+ .keySchema(KeySchemaElement.builder().attributeName("id").keyType(KeyType.HASH).build())
+ .attributeDefinitions(
+ AttributeDefinition.builder().attributeName("id").attributeType(ScalarAttributeType.S).build()
+ )
+ .billingMode(BillingMode.PAY_PER_REQUEST)
+ .build()
+ );
+ assertThat(client.listTables().tableNames()).containsExactly(("demo_table"));
+ }
+ }
+
+ @Test
+ public void throwExceptionWhenAlternatorDisabled() {
+ try (ScyllaDBContainer scylladb = new ScyllaDBContainer(SCYLLADB_IMAGE)) {
+ scylladb.start();
+ assertThatThrownBy(scylladb::getAlternatorEndpoint)
+ .isInstanceOf(IllegalStateException.class)
+ .hasMessageContaining("Alternator is not enabled");
+ }
+ }
+}
diff --git a/modules/scylladb/src/test/resources/logback-test.xml b/modules/scylladb/src/test/resources/logback-test.xml
new file mode 100644
index 00000000000..83ef7a1a3ef
--- /dev/null
+++ b/modules/scylladb/src/test/resources/logback-test.xml
@@ -0,0 +1,16 @@
+
+
+
+
+
+ %d{HH:mm:ss.SSS} %-5level %logger - %msg%n
+
+
+
+
+
+
+
+
+
diff --git a/modules/scylladb/src/test/resources/scylla-test-ssl/cqlshrc b/modules/scylladb/src/test/resources/scylla-test-ssl/cqlshrc
new file mode 100644
index 00000000000..5025c1a6d10
--- /dev/null
+++ b/modules/scylladb/src/test/resources/scylla-test-ssl/cqlshrc
@@ -0,0 +1,7 @@
+[connection]
+port = 9042
+hostname = 172.17.0.3
+factory = cqlshlib.ssl.ssl_transport_factory
+[ssl]
+certfile = /etc/scylla/node0.cer.pem
+validate = false
diff --git a/modules/scylladb/src/test/resources/scylla-test-ssl/keystore.node0 b/modules/scylladb/src/test/resources/scylla-test-ssl/keystore.node0
new file mode 100644
index 00000000000..7f027beaf64
Binary files /dev/null and b/modules/scylladb/src/test/resources/scylla-test-ssl/keystore.node0 differ
diff --git a/modules/scylladb/src/test/resources/scylla-test-ssl/node0.cer b/modules/scylladb/src/test/resources/scylla-test-ssl/node0.cer
new file mode 100644
index 00000000000..619eccb0228
Binary files /dev/null and b/modules/scylladb/src/test/resources/scylla-test-ssl/node0.cer differ
diff --git a/modules/scylladb/src/test/resources/scylla-test-ssl/node0.cer.pem b/modules/scylladb/src/test/resources/scylla-test-ssl/node0.cer.pem
new file mode 100644
index 00000000000..8f538448288
--- /dev/null
+++ b/modules/scylladb/src/test/resources/scylla-test-ssl/node0.cer.pem
@@ -0,0 +1,30 @@
+Bag Attributes
+ friendlyName: node0
+ localKeyID: 54 69 6D 65 20 31 37 33 35 39 34 30 37 38 39 31 39 34
+subject=C=None, L=None, O=None, OU=None, CN=None
+issuer=C=None, L=None, O=None, OU=None, CN=None
+-----BEGIN CERTIFICATE-----
+MIIEOzCCAqOgAwIBAgIIY4iVNsJSWiEwDQYJKoZIhvcNAQEMBQAwSzENMAsGA1UE
+BhMETm9uZTENMAsGA1UEBxMETm9uZTENMAsGA1UEChMETm9uZTENMAsGA1UECxME
+Tm9uZTENMAsGA1UEAxMETm9uZTAgFw0yNTAxMDMyMTM5MzFaGA8yMTI0MTIxMDIx
+MzkzMVowSzENMAsGA1UEBhMETm9uZTENMAsGA1UEBxMETm9uZTENMAsGA1UEChME
+Tm9uZTENMAsGA1UECxMETm9uZTENMAsGA1UEAxMETm9uZTCCAaIwDQYJKoZIhvcN
+AQEBBQADggGPADCCAYoCggGBAJuC18n+jlDcmR8CWxSK3fR2t1Am8P7IK5FY3ky8
+vEJSCMh+GoiqXVq67zhpOJnlgvEEZIDJGzBmJ/nIZvQwIAMxs792fHIEpEI2GTpf
+oaMf/9AAuPXuscg+5i4us1eVyVbrq3sREJ2NXHIPylcjtbwLjuepvmXTLp1d7oOJ
+Ad0X0W3UN/uwrlV3NPBuVLjJiCvJijWrCv1lFTuIcclqs478ozllp8UfcwJ57OH2
+Hq1ee9Ex9y7HouDPfFzmMRp1/jEcb0xbefpdW3Am6P9AXQuw2JMempwt5KbrAE+Z
+V1JnZCjSYSkspwid2bt5To/o60ypZUUswElasgAV/k8AxxDOkJGZusEqqVH7EFvk
+h3FiY/jb9cM1t5eLcpjx0wA+GOuErW3dgH5/WYugY2iiYjP1IQTb8Pk+gfAvq+2p
+SX3wISDCAh53j+aceUvNf+lItXsz66V9e+VH1xcOZcyO4gAMUVNYQFv/2wZ9knK4
+o30Aiqir1g2Hd5F/rWYNum+UbQIDAQABoyEwHzAdBgNVHQ4EFgQUqAWcYa3l/OHI
+JACasy+bZUwHP9kwDQYJKoZIhvcNAQEMBQADggGBAJQo55VJd8aEv6uiC5bKdACo
+M1GMvxWXUFzTdh2XKTOMF5GWwGJ3WRuW9o9wMZwXjvRihPfnx+DnfCCgZBOTGLXB
+3ObsogR9rij4uquUIkGJsshggY2gO82NVD7dRwGClncwTI+/RU7qGUym4SEdg6GP
+yfad3eTvqscQU1mNTxkaH0IDzPm0SWF8lcgGnrdHWlN+Nb8MJSHL5NFc9DA9pZck
+5/4MG1X8Hsk/UT04ln+8VrhYFkxkDv4fSKlr65slrst5721J0j+VLEwnuEl1onpW
+WHTTTIcOTDR5asrN9ZACCUsBxST8yfoJQ5G4HMO+UI1/1d928Ug6kHNWw2WR5FGG
+pJVu9vpTdA01MNkSeCuZhaPe2XgZcNPyHXcVxslNvFFZ0FVt6pSIhtmZ+4a8dRsm
+eU4NQ+PJ24En/8dErxaPqmi31wRZBg5Y9YlugJV4GQszCKHr0OYNK+Lpdq9dboUj
+6lxX7+gshUgKMzunUl/rTvddG7e/WuZbi9IvmJ4MYw==
+-----END CERTIFICATE-----
diff --git a/modules/scylladb/src/test/resources/scylla-test-ssl/node0.key.pem b/modules/scylladb/src/test/resources/scylla-test-ssl/node0.key.pem
new file mode 100644
index 00000000000..26ff1a8b80d
--- /dev/null
+++ b/modules/scylladb/src/test/resources/scylla-test-ssl/node0.key.pem
@@ -0,0 +1,44 @@
+Bag Attributes
+ friendlyName: node0
+ localKeyID: 54 69 6D 65 20 31 37 33 35 39 34 30 37 38 39 31 39 34
+Key Attributes:
+-----BEGIN PRIVATE KEY-----
+MIIG/AIBADANBgkqhkiG9w0BAQEFAASCBuYwggbiAgEAAoIBgQCbgtfJ/o5Q3Jkf
+AlsUit30drdQJvD+yCuRWN5MvLxCUgjIfhqIql1auu84aTiZ5YLxBGSAyRswZif5
+yGb0MCADMbO/dnxyBKRCNhk6X6GjH//QALj17rHIPuYuLrNXlclW66t7ERCdjVxy
+D8pXI7W8C47nqb5l0y6dXe6DiQHdF9Ft1Df7sK5VdzTwblS4yYgryYo1qwr9ZRU7
+iHHJarOO/KM5ZafFH3MCeezh9h6tXnvRMfcux6Lgz3xc5jEadf4xHG9MW3n6XVtw
+Juj/QF0LsNiTHpqcLeSm6wBPmVdSZ2Qo0mEpLKcIndm7eU6P6OtMqWVFLMBJWrIA
+Ff5PAMcQzpCRmbrBKqlR+xBb5IdxYmP42/XDNbeXi3KY8dMAPhjrhK1t3YB+f1mL
+oGNoomIz9SEE2/D5PoHwL6vtqUl98CEgwgIed4/mnHlLzX/pSLV7M+ulfXvlR9cX
+DmXMjuIADFFTWEBb/9sGfZJyuKN9AIqoq9YNh3eRf61mDbpvlG0CAwEAAQKCAYAT
+SMt3qhB96I04cjNXPc0+ZoZe8yVJgwscEBgpDfKOitu5+SFTN0UyXiISLcIuG278
+cl4ANnAftVtZt0dFGr6thrlSkd/mx7qS12CTg45oyywO4DgPj1UOjvY+Xd4xi0qX
+c8wlC72yu/ft0RV3bt83fXtwMPWCbQjHzQEp4JCRmUWISBvVI1jLEmhHNHdfHua6
+/1gbRaWsPJ/AbTAnGQtBPQUEth1y7W52rSX582pkd2YFUBvl+i2xkSlL3+PQ8zar
+5giPYZrGh5pCu/bflAsBGZyRx9keSsRK/bzqE0xeRAwTOir2V6g7LbSKLC04xKNc
+06/rHf1gslHNNOC3SjHvPyPfTJFHG9Tm+J5OoGo/Rr/W+GNgFMsFJ1fIq1VedpTt
+ov4CBnBgew8uHTwCoiL6T7f/ttd206A6nhEZ9tWFf8v0o6+y6Z7g0VniU9IuLRLr
+hXuKkxbBDZQRO8equlAKtbkqv6YFbGImmF/1YwP1/Ct1TR1BDM3m1UB6eez7BWEC
+gcEAx2RL8dJCVbKoRMjsKqNNh0R3vIz0+S8PTi3yjFjhggUCWOzlwMVFv/y0ztGf
+pj6Y41eaIdTwQu76uZra748Uj1Vwj5zAKXhb/THWoAidONFRj+qJ3ylDobrO5Fme
+RiCFlIfjNc6wYQiGqSMXTF02O67to44G+4zsrz+syIZO3ANOR+uB+LUNqvFKL5Kk
+BUDtU+r9poIoXkgYylzRb/6H0J+D0fcPGg+LHeRvp3DL6uueDN7eGXxdy7hF/q3L
+DqHlAoHBAMepUZUe5m6h6wIYWoaXPwvSeuBSHWUiGEqoNCrA/1tBI49AOjfn6ccy
+vu51ng/hEI/XpQ+QXvM/MNk3wyKe3HMjaPKiRbro9EFtva3pz3SrLoRHHzSGkzW3
+iTavg8RKo76Pz7MNEVqfkFn0pYr85EMIe4hmmrdR6nwd1oJY1CEMf4wllhWG+v1y
+901xLisuRZFE/X4ASvyDyY0Nh+9Cfd+80QS9fpZwuCR+mHQvIpp89F/Ohqyhk9CU
+HLncQD2f6QKBwBJZUX/UeJRIV6HU157o3kaXb2ljk1unEAKCyfJOb5o2ecvTKSV/
+Qfbz+3OY6Nc0pX8uXZnFbcLLGTmhXYp0IVE7bJtasnhegiCfyH97q3RCFv5md/+Y
+XYfxl/59nMoZThGoG6mk9qhHT5UbDJbTcR028Nl/RXc6tcE+29isO2+VwktuCczo
+ZHSZtdkA5qUxH2X8lxEOo0Zh3h4pQoDK7JavR0M4OCSOz5+VmQzQnYNl4WqPy+KO
+hlcsAwz301rqXQKBwHkY2+9q924gbM4vgTBiqY19EqPdihCd1kfprwJDXl21q2Cm
+HulrkqILyDwPQFf3NLlZnLZM5Rn5uKH2rTbhTWnUD0IiY9KSmhrY+ZNy3S2w6Zy3
+GlkcSkrpT6LIX039y0S4Ksw5X84sOzwkIweijLuPeIVpXetUFrlCy6jxQW/uCaox
+3c6euLpiMVZaEBuGjBEo2+rBOLnhIKyZiVn3ZSr/dXK/j/ik0zrnQYYuVHmI0hsN
+wycPNPzr6GReDuSRiQKBwChoS1Vvv49agWjyViIohGm6GHsY1Y1FNIqddHN5KgfA
+LGZRm8JhlTBPX89KgWUpemDjRHw84vqF46Md9+eeuovr697/fEVQ1W4FWJs9JLej
+2zmRlZqQgFnR6hdeeg1l7V8bPLR1zfl0R7+UkguP1xuI55fZc9H5icMCrOOCo1ug
+vdBrhNl4Swzn+wTVY62J/GX86Rfeybvn+BJQW4RCuKFqcxqctPuR5i+wMOxKWZP3
+fMq1U6czbhYvEjp3Y42Exw==
+-----END PRIVATE KEY-----
diff --git a/modules/scylladb/src/test/resources/scylla-test-ssl/node0.p12 b/modules/scylladb/src/test/resources/scylla-test-ssl/node0.p12
new file mode 100644
index 00000000000..e6a2f5634ec
Binary files /dev/null and b/modules/scylladb/src/test/resources/scylla-test-ssl/node0.p12 differ
diff --git a/modules/scylladb/src/test/resources/scylla-test-ssl/scylla.yaml b/modules/scylladb/src/test/resources/scylla-test-ssl/scylla.yaml
new file mode 100644
index 00000000000..e71e807463b
--- /dev/null
+++ b/modules/scylladb/src/test/resources/scylla-test-ssl/scylla.yaml
@@ -0,0 +1,662 @@
+# Scylla storage config YAML
+
+#######################################
+# This file is split to two sections:
+# 1. Supported parameters
+# 2. Unsupported parameters: reserved for future use or backwards
+# compatibility.
+# Scylla will only read and use the first segment
+#######################################
+
+### Supported Parameters
+
+# The name of the cluster. This is mainly used to prevent machines in
+# one logical cluster from joining another.
+# It is recommended to change the default value when creating a new cluster.
+# You can NOT modify this value for an existing cluster
+#cluster_name: 'Test Cluster'
+
+# This defines the number of tokens randomly assigned to this node on the ring
+# The more tokens, relative to other nodes, the larger the proportion of data
+# that this node will store. You probably want all nodes to have the same number
+# of tokens assuming they have equal hardware capability.
+num_tokens: 256
+
+# Directory where Scylla should store all its files, which are commitlog,
+# data, hints, view_hints and saved_caches subdirectories. All of these
+# subs can be overridden by the respective options below.
+# If unset, the value defaults to /var/lib/scylla
+# workdir: /var/lib/scylla
+
+# Directory where Scylla should store data on disk.
+# data_file_directories:
+# - /var/lib/scylla/data
+
+# commit log. when running on magnetic HDD, this should be a
+# separate spindle than the data directories.
+# commitlog_directory: /var/lib/scylla/commitlog
+
+# schema commit log. A special commitlog instance
+# used for schema and system tables.
+# When running on magnetic HDD, this should be a
+# separate spindle than the data directories.
+# schema_commitlog_directory: /var/lib/scylla/commitlog/schema
+
+# commitlog_sync may be either "periodic" or "batch."
+#
+# When in batch mode, Scylla won't ack writes until the commit log
+# has been fsynced to disk. It will wait
+# commitlog_sync_batch_window_in_ms milliseconds between fsyncs.
+# This window should be kept short because the writer threads will
+# be unable to do extra work while waiting. (You may need to increase
+# concurrent_writes for the same reason.)
+#
+# commitlog_sync: batch
+# commitlog_sync_batch_window_in_ms: 2
+#
+# the other option is "periodic" where writes may be acked immediately
+# and the CommitLog is simply synced every commitlog_sync_period_in_ms
+# milliseconds.
+commitlog_sync: periodic
+commitlog_sync_period_in_ms: 10000
+
+# The size of the individual commitlog file segments. A commitlog
+# segment may be archived, deleted, or recycled once all the data
+# in it (potentially from each columnfamily in the system) has been
+# flushed to sstables.
+#
+# The default size is 32, which is almost always fine, but if you are
+# archiving commitlog segments (see commitlog_archiving.properties),
+# then you probably want a finer granularity of archiving; 8 or 16 MB
+# is reasonable.
+commitlog_segment_size_in_mb: 32
+
+# The size of the individual schema commitlog file segments.
+#
+# The default size is 128, which is 4 times larger than the default
+# size of the data commitlog. It's because the segment size puts
+# a limit on the mutation size that can be written at once, and some
+# schema mutation writes are much larger than average.
+schema_commitlog_segment_size_in_mb: 128
+
+# any class that implements the SeedProvider interface and has a
+# constructor that takes a Map of parameters will do.
+seed_provider:
+ # Addresses of hosts that are deemed contact points.
+ # Cassandra nodes use this list of hosts to find each other and learn
+ # the topology of the ring. You must change this if you are running
+ # multiple nodes!
+ - class_name: org.apache.cassandra.locator.SimpleSeedProvider
+ parameters:
+ # seeds is actually a comma-delimited list of addresses.
+ # Ex: ",,"
+ - seeds: "172.17.0.3,127.0.0.1,172.17.0.2,172.17.0.4,172.17.0.5"
+
+
+# Address to bind to and tell other Scylla nodes to connect to.
+# You _must_ change this if you want multiple nodes to be able to communicate!
+#
+# If you leave broadcast_address (below) empty, then setting listen_address
+# to 0.0.0.0 is wrong as other nodes will not know how to reach this node.
+# If you set broadcast_address, then you can set listen_address to 0.0.0.0.
+listen_address: localhost
+
+# Address to broadcast to other Scylla nodes
+# Leaving this blank will set it to the same value as listen_address
+# broadcast_address: 1.2.3.4
+
+
+# When using multiple physical network interfaces, set this to true to listen on broadcast_address
+# in addition to the listen_address, allowing nodes to communicate in both interfaces.
+# Ignore this property if the network configuration automatically routes between the public and private networks such as EC2.
+#
+# listen_on_broadcast_address: false
+
+# port for the CQL native transport to listen for clients on
+# For security reasons, you should not expose this port to the internet. Firewall it if needed.
+# To disable the CQL native transport, remove this option and configure native_transport_port_ssl.
+native_transport_port: 9042
+
+# Like native_transport_port, but clients are forwarded to specific shards, based on the
+# client-side port numbers.
+native_shard_aware_transport_port: 19042
+
+# Enabling native transport encryption in client_encryption_options allows you to either use
+# encryption for the standard port or to use a dedicated, additional port along with the unencrypted
+# standard native_transport_port.
+# Enabling client encryption and keeping native_transport_port_ssl disabled will use encryption
+# for native_transport_port. Setting native_transport_port_ssl to a different value
+# from native_transport_port will use encryption for native_transport_port_ssl while
+# keeping native_transport_port unencrypted.
+#native_transport_port_ssl: 9142
+
+# Like native_transport_port_ssl, but clients are forwarded to specific shards, based on the
+# client-side port numbers.
+#native_shard_aware_transport_port_ssl: 19142
+
+# How long the coordinator should wait for read operations to complete
+read_request_timeout_in_ms: 5000
+
+# How long the coordinator should wait for writes to complete
+write_request_timeout_in_ms: 2000
+# how long a coordinator should continue to retry a CAS operation
+# that contends with other proposals for the same row
+cas_contention_timeout_in_ms: 1000
+
+# phi value that must be reached for a host to be marked down.
+# most users should never need to adjust this.
+# phi_convict_threshold: 8
+
+# IEndpointSnitch. The snitch has two functions:
+# - it teaches Scylla enough about your network topology to route
+# requests efficiently
+# - it allows Scylla to spread replicas around your cluster to avoid
+# correlated failures. It does this by grouping machines into
+# "datacenters" and "racks." Scylla will do its best not to have
+# more than one replica on the same "rack" (which may not actually
+# be a physical location)
+#
+# IF YOU CHANGE THE SNITCH AFTER DATA IS INSERTED INTO THE CLUSTER,
+# YOU MUST RUN A FULL REPAIR, SINCE THE SNITCH AFFECTS WHERE REPLICAS
+# ARE PLACED.
+#
+# Out of the box, Scylla provides
+# - SimpleSnitch:
+# Treats Strategy order as proximity. This can improve cache
+# locality when disabling read repair. Only appropriate for
+# single-datacenter deployments.
+# - GossipingPropertyFileSnitch
+# This should be your go-to snitch for production use. The rack
+# and datacenter for the local node are defined in
+# cassandra-rackdc.properties and propagated to other nodes via
+# gossip. If cassandra-topology.properties exists, it is used as a
+# fallback, allowing migration from the PropertyFileSnitch.
+# - PropertyFileSnitch:
+# Proximity is determined by rack and data center, which are
+# explicitly configured in cassandra-topology.properties.
+# - Ec2Snitch:
+# Appropriate for EC2 deployments in a single Region. Loads Region
+# and Availability Zone information from the EC2 API. The Region is
+# treated as the datacenter, and the Availability Zone as the rack.
+# Only private IPs are used, so this will not work across multiple
+# Regions.
+# - Ec2MultiRegionSnitch:
+# Uses public IPs as broadcast_address to allow cross-region
+# connectivity. (Thus, you should set seed addresses to the public
+# IP as well.) You will need to open the storage_port or
+# ssl_storage_port on the public IP firewall. (For intra-Region
+# traffic, Scylla will switch to the private IP after
+# establishing a connection.)
+# - RackInferringSnitch:
+# Proximity is determined by rack and data center, which are
+# assumed to correspond to the 3rd and 2nd octet of each node's IP
+# address, respectively. Unless this happens to match your
+# deployment conventions, this is best used as an example of
+# writing a custom Snitch class and is provided in that spirit.
+#
+# You can use a custom Snitch by setting this to the full class name
+# of the snitch, which will be assumed to be on your classpath.
+endpoint_snitch: SimpleSnitch
+
+# The address or interface to bind the native transport server to.
+#
+# Set rpc_address OR rpc_interface, not both. Interfaces must correspond
+# to a single address, IP aliasing is not supported.
+#
+# Leaving rpc_address blank has the same effect as on listen_address
+# (i.e. it will be based on the configured hostname of the node).
+#
+# Note that unlike listen_address, you can specify 0.0.0.0, but you must also
+# set broadcast_rpc_address to a value other than 0.0.0.0.
+#
+# For security reasons, you should not expose this port to the internet. Firewall it if needed.
+#
+# If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address
+# you can specify which should be chosen using rpc_interface_prefer_ipv6. If false the first ipv4
+# address will be used. If true the first ipv6 address will be used. Defaults to false preferring
+# ipv4. If there is only one address it will be selected regardless of ipv4/ipv6.
+rpc_address: localhost
+# rpc_interface: eth1
+# rpc_interface_prefer_ipv6: false
+
+# port for REST API server
+api_port: 10000
+
+# IP for the REST API server
+api_address: 127.0.0.1
+
+# Log WARN on any batch size exceeding this value. 128 kiB per batch by default.
+# Caution should be taken on increasing the size of this threshold as it can lead to node instability.
+batch_size_warn_threshold_in_kb: 128
+
+# Fail any multiple-partition batch exceeding this value. 1 MiB (8x warn threshold) by default.
+batch_size_fail_threshold_in_kb: 1024
+
+ # Authentication backend, identifying users
+ # Out of the box, Scylla provides org.apache.cassandra.auth.{AllowAllAuthenticator,
+ # PasswordAuthenticator}.
+ #
+ # - AllowAllAuthenticator performs no checks - set it to disable authentication.
+ # - PasswordAuthenticator relies on username/password pairs to authenticate
+ # users. It keeps usernames and hashed passwords in system_auth.credentials table.
+ # Please increase system_auth keyspace replication factor if you use this authenticator.
+ # - com.scylladb.auth.TransitionalAuthenticator requires username/password pair
+ # to authenticate in the same manner as PasswordAuthenticator, but improper credentials
+ # result in being logged in as an anonymous user. Use for upgrading clusters' auth.
+ # authenticator: AllowAllAuthenticator
+
+ # Authorization backend, implementing IAuthorizer; used to limit access/provide permissions
+ # Out of the box, Scylla provides org.apache.cassandra.auth.{AllowAllAuthorizer,
+ # CassandraAuthorizer}.
+ #
+ # - AllowAllAuthorizer allows any action to any user - set it to disable authorization.
+ # - CassandraAuthorizer stores permissions in system_auth.permissions table. Please
+ # increase system_auth keyspace replication factor if you use this authorizer.
+ # - com.scylladb.auth.TransitionalAuthorizer wraps around the CassandraAuthorizer, using it for
+ # authorizing permission management. Otherwise, it allows all. Use for upgrading
+ # clusters' auth.
+ # authorizer: AllowAllAuthorizer
+
+ # initial_token allows you to specify tokens manually. While you can use # it with
+ # vnodes (num_tokens > 1, above) -- in which case you should provide a
+ # comma-separated list -- it's primarily used when adding nodes # to legacy clusters
+ # that do not have vnodes enabled.
+ # initial_token:
+
+ # RPC address to broadcast to drivers and other Scylla nodes. This cannot
+ # be set to 0.0.0.0. If left blank, this will be set to the value of
+ # rpc_address. If rpc_address is set to 0.0.0.0, broadcast_rpc_address must
+ # be set.
+ # broadcast_rpc_address: 1.2.3.4
+
+ # Uncomment to enable experimental features
+ # experimental_features:
+ # - udf
+ # - alternator-streams
+ # - broadcast-tables
+ # - keyspace-storage-options
+
+ # The directory where hints files are stored if hinted handoff is enabled.
+ # hints_directory: /var/lib/scylla/hints
+
+# The directory where hints files are stored for materialized-view updates
+# view_hints_directory: /var/lib/scylla/view_hints
+
+# See https://docs.scylladb.com/architecture/anti-entropy/hinted-handoff
+# May either be "true" or "false" to enable globally, or contain a list
+# of data centers to enable per-datacenter.
+# hinted_handoff_enabled: DC1,DC2
+# hinted_handoff_enabled: true
+
+# this defines the maximum amount of time a dead host will have hints
+# generated. After it has been dead this long, new hints for it will not be
+# created until it has been seen alive and gone down again.
+# max_hint_window_in_ms: 10800000 # 3 hours
+
+
+# Validity period for permissions cache (fetching permissions can be an
+# expensive operation depending on the authorizer, CassandraAuthorizer is
+# one example). Defaults to 10000, set to 0 to disable.
+# Will be disabled automatically for AllowAllAuthorizer.
+# permissions_validity_in_ms: 10000
+
+# Refresh interval for permissions cache (if enabled).
+# After this interval, cache entries become eligible for refresh. Upon next
+# access, an async reload is scheduled and the old value returned until it
+# completes. If permissions_validity_in_ms is non-zero, then this also must have
+# a non-zero value. Defaults to 2000. It's recommended to set this value to
+# be at least 3 times smaller than the permissions_validity_in_ms.
+# permissions_update_interval_in_ms: 2000
+
+# The partitioner is responsible for distributing groups of rows (by
+# partition key) across nodes in the cluster. You should leave this
+# alone for new clusters. The partitioner can NOT be changed without
+# reloading all data, so when upgrading you should set this to the
+# same partitioner you were already using.
+#
+# Murmur3Partitioner is currently the only supported partitioner,
+#
+partitioner: org.apache.cassandra.dht.Murmur3Partitioner
+
+# Total space to use for commitlogs.
+#
+# If space gets above this value (it will round up to the next nearest
+# segment multiple), Scylla will flush every dirty CF in the oldest
+# segment and remove it. So a small total commitlog space will tend
+# to cause more flush activity on less-active columnfamilies.
+#
+# A value of -1 (default) will automatically equate it to the total amount of memory
+# available for Scylla.
+commitlog_total_space_in_mb: -1
+
+# TCP port, for commands and data
+# For security reasons, you should not expose this port to the internet. Firewall it if needed.
+# storage_port: 7000
+
+# SSL port, for encrypted communication. Unused unless enabled in
+# encryption_options
+# For security reasons, you should not expose this port to the internet. Firewall it if needed.
+# ssl_storage_port: 7001
+
+# listen_interface: eth0
+# listen_interface_prefer_ipv6: false
+
+# Whether to start the native transport server.
+# Please note that the address on which the native transport is bound is the
+# same as the rpc_address. The port however is different and specified below.
+# start_native_transport: true
+
+# The maximum size of allowed frame. Frame (requests) larger than this will
+# be rejected as invalid. The default is 256MB.
+# native_transport_max_frame_size_in_mb: 256
+
+# enable or disable keepalive on rpc/native connections
+# rpc_keepalive: true
+
+# Set to true to have Scylla create a hard link to each sstable
+# flushed or streamed locally in a backups/ subdirectory of the
+# keyspace data. Removing these links is the operator's
+# responsibility.
+# incremental_backups: false
+
+# Whether or not to take a snapshot before each compaction. Be
+# careful using this option, since Scylla won't clean up the
+# snapshots for you. Mostly useful if you're paranoid when there
+# is a data format change.
+# snapshot_before_compaction: false
+
+# Whether or not a snapshot is taken of the data before keyspace truncation
+# or dropping of column families. The STRONGLY advised default of true
+# should be used to provide data safety. If you set this flag to false, you will
+# lose data on truncation or drop.
+# auto_snapshot: true
+
+# When executing a scan, within or across a partition, we need to keep the
+# tombstones seen in memory so we can return them to the coordinator, which
+# will use them to make sure other replicas also know about the deleted rows.
+# With workloads that generate a lot of tombstones, this can cause performance
+# problems and even exhaust the server heap.
+# (http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets)
+# Adjust the thresholds here if you understand the dangers and want to
+# scan more tombstones anyway. These thresholds may also be adjusted at runtime
+# using the StorageService mbean.
+# tombstone_warn_threshold: 1000
+# tombstone_failure_threshold: 100000
+
+# Granularity of the collation index of rows within a partition.
+# Increase if your rows are large, or if you have a very large
+# number of rows per partition. The competing goals are these:
+# 1) a smaller granularity means more index entries are generated
+# and looking up rows within the partition by collation column
+# is faster
+# 2) but, Scylla will keep the collation index in memory for hot
+# rows (as part of the key cache), so a larger granularity means
+# you can cache more hot rows
+# column_index_size_in_kb: 64
+
+# Auto-scaling of the promoted index prevents running out of memory
+# when the promoted index grows too large (due to partitions with many rows
+# vs. too small column_index_size_in_kb). When the serialized representation
+# of the promoted index grows by this threshold, the desired block size
+# for this partition (initialized to column_index_size_in_kb)
+# is doubled, to decrease the sampling resolution by half.
+#
+# To disable promoted index auto-scaling, set the threshold to 0.
+# column_index_auto_scale_threshold_in_kb: 10240
+
+# Log a warning when writing partitions larger than this value
+# compaction_large_partition_warning_threshold_mb: 1000
+
+# Log a warning when writing rows larger than this value
+# compaction_large_row_warning_threshold_mb: 10
+
+# Log a warning when writing cells larger than this value
+# compaction_large_cell_warning_threshold_mb: 1
+
+# Log a warning when row number is larger than this value
+# compaction_rows_count_warning_threshold: 100000
+
+# Log a warning when writing a collection containing more elements than this value
+# compaction_collection_elements_count_warning_threshold: 10000
+
+# How long the coordinator should wait for seq or index scans to complete
+# range_request_timeout_in_ms: 10000
+# How long the coordinator should wait for writes to complete
+# counter_write_request_timeout_in_ms: 5000
+# How long a coordinator should continue to retry a CAS operation
+# that contends with other proposals for the same row
+# cas_contention_timeout_in_ms: 1000
+# How long the coordinator should wait for truncates to complete
+# (This can be much longer, because unless auto_snapshot is disabled
+# we need to flush first so we can snapshot before removing the data.)
+# truncate_request_timeout_in_ms: 60000
+# The default timeout for other, miscellaneous operations
+# request_timeout_in_ms: 10000
+
+# Enable or disable inter-node encryption.
+# You must also generate keys and provide the appropriate key and trust store locations and passwords.
+#
+# The available internode options are : all, none, dc, rack
+# If set to dc scylla will encrypt the traffic between the DCs
+# If set to rack scylla will encrypt the traffic between the racks
+#
+# SSL/TLS algorithm and ciphers used can be controlled by
+# the priority_string parameter. Info on priority string
+# syntax and values is available at:
+# https://gnutls.org/manual/html_node/Priority-Strings.html
+#
+# The require_client_auth parameter allows you to
+# restrict access to service based on certificate
+# validation. Client must provide a certificate
+# accepted by the used trust store to connect.
+#
+# server_encryption_options:
+# internode_encryption: none
+# certificate: conf/scylla.crt
+# keyfile: conf/scylla.key
+# truststore:
+# certficate_revocation_list:
+# require_client_auth: False
+# priority_string:
+
+# enable or disable client/server encryption.
+client_encryption_options:
+ enabled: true
+ certificate: /etc/scylla/node0.cer.pem
+ keyfile: /etc/scylla/node0.key.pem
+ truststore: /etc/scylla/truststore.node0
+ truststore_password: scylla
+# certficate_revocation_list:
+# require_client_auth: False
+# priority_string:
+
+# internode_compression controls whether traffic between nodes is
+# compressed.
+# can be: all - all traffic is compressed
+# dc - traffic between different datacenters is compressed
+# none - nothing is compressed.
+# internode_compression: none
+
+# Enables inter-node traffic compression metrics (`scylla_rpc_compression_...`)
+# and enables a new implementation of inter-node traffic compressors,
+# capable of using zstd (in addition to the default lz4)
+# and shared dictionaries.
+# (Those features must still be enabled by other settings).
+# Has minor CPU cost.
+#
+# internode_compression_enable_advanced: false
+
+# Enables training of shared compression dictionaries on inter-node traffic.
+# New dictionaries are distributed throughout the cluster via Raft,
+# and used to improve the effectiveness of inter-node traffic compression
+# when `internode_compression_enable_advanced` is enabled.
+#
+# WARNING: this may leak unencrypted data to disk. The trained dictionaries
+# contain randomly-selected pieces of data written to the cluster.
+# When the Raft log is unencrypted, those pieces of data will be
+# written to disk unencrypted. At the moment of writing, there is no
+# way to encrypt the Raft log.
+# This problem is tracked by https://github.com/scylladb/scylla-enterprise/issues/4717.
+#
+# Can be: never - Dictionaries aren't trained by this node.
+# when_leader - New dictionaries are trained by this node only if
+# it's the current Raft leader.
+# always - Dictionaries are trained by this node unconditionally.
+#
+# For efficiency reasons, training shouldn't be enabled on more than one node.
+# To enable it on a single node, one can let the cluster pick the trainer
+# by setting `when_leader` on all nodes, or specify one manually by setting `always`
+# on one node and `never` on others.
+#
+# rpc_dict_training_when: never
+
+# A number in range [0.0, 1.0] specifying the share of CPU which can be spent
+# by this node on compressing inter-node traffic with zstd.
+#
+# Depending on the workload, enabling zstd might have a drastic negative
+# effect on performance, so it shouldn't be done lightly.
+#
+# internode_compression_zstd_max_cpu_fraction: 0.0
+
+# Enable or disable tcp_nodelay for inter-dc communication.
+# Disabling it will result in larger (but fewer) network packets being sent,
+# reducing overhead from the TCP protocol itself, at the cost of increasing
+# latency if you block for cross-datacenter responses.
+# inter_dc_tcp_nodelay: false
+
+# Relaxation of environment checks.
+#
+# Scylla places certain requirements on its environment. If these requirements are
+# not met, performance and reliability can be degraded.
+#
+# These requirements include:
+# - A filesystem with good support for asynchronous I/O (AIO). Currently,
+# this means XFS.
+#
+# false: strict environment checks are in place; do not start if they are not met.
+# true: relaxed environment checks; performance and reliability may degraade.
+#
+# developer_mode: false
+
+
+# Idle-time background processing
+#
+# Scylla can perform certain jobs in the background while the system is otherwise idle,
+# freeing processor resources when there is other work to be done.
+#
+# defragment_memory_on_idle: true
+#
+# prometheus port
+# By default, Scylla opens prometheus API port on port 9180
+# setting the port to 0 will disable the prometheus API.
+# prometheus_port: 9180
+#
+# prometheus address
+# Leaving this blank will set it to the same value as listen_address.
+# This means that by default, Scylla listens to the prometheus API on the same
+# listening address (and therefore network interface) used to listen for
+# internal communication. If the monitoring node is not in this internal
+# network, you can override prometheus_address explicitly - e.g., setting
+# it to 0.0.0.0 to listen on all interfaces.
+# prometheus_address: 1.2.3.4
+
+# Distribution of data among cores (shards) within a node
+#
+# Scylla distributes data within a node among shards, using a round-robin
+# strategy:
+# [shard0] [shard1] ... [shardN-1] [shard0] [shard1] ... [shardN-1] ...
+#
+# Scylla versions 1.6 and below used just one repetition of the pattern;
+# this interfered with data placement among nodes (vnodes).
+#
+# Scylla versions 1.7 and above use 4096 repetitions of the pattern; this
+# provides for better data distribution.
+#
+# the value below is log (base 2) of the number of repetitions.
+#
+# Set to 0 to avoid rewriting all data when upgrading from Scylla 1.6 and
+# below.
+#
+# Keep at 12 for new clusters.
+murmur3_partitioner_ignore_msb_bits: 12
+
+# Use on a new, parallel algorithm for performing aggregate queries.
+# Set to `false` to fall-back to the old algorithm.
+# enable_parallelized_aggregation: true
+
+# Time for which task manager task started internally is kept in memory after it completes.
+# task_ttl_in_seconds: 0
+
+# Time for which task manager task started by user is kept in memory after it completes.
+# user_task_ttl_in_seconds: 3600
+
+# In materialized views, restrictions are allowed only on the view's primary key columns.
+# In old versions Scylla mistakenly allowed IS NOT NULL restrictions on columns which were not part
+# of the view's primary key. These invalid restrictions were ignored.
+# This option controls the behavior when someone tries to create a view with such invalid IS NOT NULL restrictions.
+#
+# Can be true, false, or warn.
+# * `true`: IS NOT NULL is allowed only on the view's primary key columns,
+# trying to use it on other columns will cause an error, as it should.
+# * `false`: Scylla accepts IS NOT NULL restrictions on regular columns, but they're silently ignored.
+# It's useful for backwards compatibility.
+# * `warn`: The same as false, but there's a warning about invalid view restrictions.
+#
+# To preserve backwards compatibility on old clusters, Scylla's default setting is `warn`.
+# New clusters have this option set to `true` by scylla.yaml (which overrides the default `warn`)
+# to make sure that trying to create an invalid view causes an error.
+strict_is_not_null_in_views: true
+
+# The Unix Domain Socket the node uses for maintenance socket.
+# The possible options are:
+# * ignore: the node will not open the maintenance socket,
+# * workdir: the node will open the maintenance socket on the path /cql.m,
+# where is a path defined by the workdir configuration option,
+# * : the node will open the maintenance socket on the path .
+maintenance_socket: ignore
+
+# If set to true, configuration parameters defined with LiveUpdate option can be updated in runtime with CQL
+# by updating system.config virtual table. If we don't want any configuration parameter to be changed in runtime
+# via CQL, this option should be set to false. This parameter doesn't impose any limits on other mechanisms updating
+# configuration parameters in runtime, e.g. sending SIGHUP or using API. This option should be set to false
+# e.g. for cloud users, for whom scylla's configuration should be changed only by support engineers.
+# live_updatable_config_params_changeable_via_cql: true
+
+# ****************
+# * GUARDRAILS *
+# ****************
+
+# Guardrails to warn or fail when Replication Factor is smaller/greater than the threshold.
+# Please note that the value of 0 is always allowed,
+# which means that having no replication at all, i.e. RF = 0, is always valid.
+# A guardrail value smaller than 0, e.g. -1, means that the guardrail is disabled.
+# Commenting out a guardrail also means it is disabled.
+# minimum_replication_factor_fail_threshold: -1
+# minimum_replication_factor_warn_threshold: 3
+# maximum_replication_factor_warn_threshold: -1
+# maximum_replication_factor_fail_threshold: -1
+
+# Guardrails to warn about or disallow creating a keyspace with specific replication strategy.
+# Each of these 2 settings is a list storing replication strategies considered harmful.
+# The replication strategies to choose from are:
+# 1) SimpleStrategy,
+# 2) NetworkTopologyStrategy,
+# 3) LocalStrategy,
+# 4) EverywhereStrategy
+#
+# replication_strategy_warn_list:
+# - SimpleStrategy
+# replication_strategy_fail_list:
+
+# Enable tablets for new keyspaces.
+# When enabled, newly created keyspaces will have tablets enabled by default.
+# That can be explicitly disabled in the CREATE KEYSPACE query
+# by using the `tablets = {'enabled': false}` replication option.
+#
+# Correspondingly, when disabled, newly created keyspaces will use vnodes
+# unless tablets are explicitly enabled in the CREATE KEYSPACE query
+# by using the `tablets = {'enabled': true}` replication option.
+#
+# Note that creating keyspaces with tablets enabled or disabled is irreversible.
+# The `tablets` option cannot be changed using `ALTER KEYSPACE`.
+enable_tablets: true
diff --git a/modules/scylladb/src/test/resources/scylla-test-ssl/truststore.node0 b/modules/scylladb/src/test/resources/scylla-test-ssl/truststore.node0
new file mode 100644
index 00000000000..a798f8dcce4
Binary files /dev/null and b/modules/scylladb/src/test/resources/scylla-test-ssl/truststore.node0 differ