Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Disable legacy filesystem implementation by default #23343

Merged
merged 1 commit into from
Sep 13, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,7 @@ public void setupServer()
.put("hive.metastore", "file")
.put("hive.metastore.catalog.dir", server.getBaseDataDir().resolve("hive").toAbsolutePath().toString())
.put("hive.security", "sql-standard")
.put("fs.hadoop.enabled", "true")
.buildOrThrow());
server.installPlugin(new BlackHolePlugin());
server.createCatalog("blackhole", "blackhole", ImmutableMap.of());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -121,6 +121,7 @@ public void setupServer()
.put("hive.metastore.catalog.dir", server.getBaseDataDir().resolve("hive").toAbsolutePath().toString())
.put("hive.security", "sql-standard")
.put("bootstrap.quiet", "true")
.put("fs.hadoop.enabled", "true")
.buildOrThrow());

countingMockConnector = new CountingMockConnector();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

public class FileSystemConfig
{
private boolean hadoopEnabled = true;
private boolean hadoopEnabled;
private boolean nativeAzureEnabled;
private boolean nativeS3Enabled;
private boolean nativeGcsEnabled;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ public class TestFileSystemConfig
public void testDefaults()
{
assertRecordedDefaults(recordDefaults(FileSystemConfig.class)
.setHadoopEnabled(true)
.setHadoopEnabled(false)
.setNativeAzureEnabled(false)
.setNativeS3Enabled(false)
.setNativeGcsEnabled(false)
Expand All @@ -39,15 +39,15 @@ public void testDefaults()
public void testExplicitPropertyMappings()
{
Map<String, String> properties = ImmutableMap.<String, String>builder()
.put("fs.hadoop.enabled", "false")
.put("fs.hadoop.enabled", "true")
.put("fs.native-azure.enabled", "true")
.put("fs.native-s3.enabled", "true")
.put("fs.native-gcs.enabled", "true")
.put("fs.cache.enabled", "true")
.buildOrThrow();

FileSystemConfig expected = new FileSystemConfig()
.setHadoopEnabled(false)
.setHadoopEnabled(true)
.setNativeAzureEnabled(true)
.setNativeS3Enabled(true)
.setNativeGcsEnabled(true)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
*/
package io.trino.plugin.deltalake;

import com.google.common.collect.ImmutableMap;
import io.trino.Session;
import io.trino.metastore.HiveMetastore;
import io.trino.plugin.deltalake.metastore.TestingDeltaLakeMetastoreModule;
Expand Down Expand Up @@ -65,11 +66,11 @@ protected QueryRunner createQueryRunner()
this.metastore = createTestMetastore(dataDirectory);

queryRunner.installPlugin(new TestingDeltaLakePlugin(dataDirectory, Optional.of(new TestingDeltaLakeMetastoreModule(metastore))));
queryRunner.createCatalog(DELTA_CATALOG_NAME, "delta_lake");
queryRunner.createCatalog(DELTA_CATALOG_NAME, "delta_lake", ImmutableMap.of("fs.hadoop.enabled", "true"));

queryRunner.installPlugin(new TestingHivePlugin(dataDirectory, metastore));

queryRunner.createCatalog(HIVE_CATALOG_NAME, "hive");
queryRunner.createCatalog(HIVE_CATALOG_NAME, "hive", ImmutableMap.of("fs.hadoop.enabled", "true"));
queryRunner.execute("CREATE SCHEMA " + SCHEMA);

return queryRunner;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -161,6 +161,10 @@ public DistributedQueryRunner build()
if (!deltaProperties.containsKey("hive.metastore") && !deltaProperties.containsKey("hive.metastore.uri")) {
deltaProperties.put("hive.metastore", "file");
}

if (!deltaProperties.containsKey("fs.hadoop.enabled")) {
deltaProperties.put("fs.hadoop.enabled", "true");
}
queryRunner.createCatalog(DELTA_CATALOG, CONNECTOR_NAME, deltaProperties);

String schemaName = queryRunner.getDefaultSession().getSchema().orElseThrow();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -105,6 +105,7 @@ protected PlanTester createPlanTester()
planTester.createCatalog(DELTA_CATALOG, "delta_lake", ImmutableMap.<String, String>builder()
.put("hive.metastore", "file")
.put("hive.metastore.catalog.dir", baseDir.toString())
.put("fs.hadoop.enabled", "true")
.buildOrThrow());

HiveMetastore metastore = TestingDeltaLakeUtils.getConnectorService(planTester, HiveMetastoreFactory.class)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@ protected QueryRunner createQueryRunner()
.put("hive.metastore.catalog.dir", dataDirectory.toString())
.put("delta.enable-non-concurrent-writes", "true")
.put("delta.hive-catalog-name", "hive_with_redirections")
.put("fs.hadoop.enabled", "true")
.buildOrThrow();

queryRunner.createCatalog("delta_with_redirections", CONNECTOR_NAME, deltaLakeProperties);
Expand All @@ -67,6 +68,7 @@ protected QueryRunner createQueryRunner()
.put("hive.metastore", "file")
.put("hive.metastore.catalog.dir", dataDirectory.toString())
.put("hive.delta-lake-catalog-name", "delta_with_redirections")
.put("fs.hadoop.enabled", "true")
.buildOrThrow());

queryRunner.execute("CREATE TABLE hive_with_redirections." + schema + ".hive_table (a_integer) WITH (format='PARQUET') AS VALUES 1, 2, 3");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -63,14 +63,15 @@ protected QueryRunner createQueryRunner()
.put("hive.metastore", "glue")
.put("hive.metastore.glue.default-warehouse-dir", dataDirectory.toUri().toString())
.put("delta.hive-catalog-name", "hive_with_redirections")
.put("fs.hadoop.enabled", "true")
.buildOrThrow());

this.glueMetastore = createTestingGlueHiveMetastore(dataDirectory, this::closeAfterClass);
queryRunner.installPlugin(new TestingHivePlugin(queryRunner.getCoordinator().getBaseDataDir().resolve("hive_data"), glueMetastore));
queryRunner.createCatalog(
"hive_with_redirections",
"hive",
ImmutableMap.of("hive.delta-lake-catalog-name", "delta_with_redirections"));
ImmutableMap.of("hive.delta-lake-catalog-name", "delta_with_redirections", "fs.hadoop.enabled", "true"));

queryRunner.execute("CREATE SCHEMA " + schema + " WITH (location = '" + dataDirectory.toUri() + "')");
queryRunner.execute("CREATE TABLE hive_with_redirections." + schema + ".hive_table (a_integer) WITH (format='PARQUET') AS VALUES 1, 2, 3");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -109,6 +109,7 @@ public void setUp()
Map<String, String> config = ImmutableMap.<String, String>builder()
.put("hive.metastore", "glue")
.put("delta.hide-non-delta-lake-tables", "true")
.put("fs.hadoop.enabled", "true")
.buildOrThrow();

ConnectorContext context = new TestingConnectorContext();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -222,6 +222,9 @@ public DistributedQueryRunner build()
hiveProperties.put("hive.metastore", "file");
hiveProperties.put("hive.metastore.catalog.dir", queryRunner.getCoordinator().getBaseDataDir().resolve("hive_data").toString());
}
if (!hiveProperties.buildOrThrow().containsKey("fs.hadoop.enabled")) {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should we not use the new navy file systems for most of the tests?

Copy link
Member Author

@anusudarsan anusudarsan Sep 10, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

All those tests were already migrated to native-fs (including product-tests), by explicitly setting fs.hadoop.enabled to false, and enabling one of s3/azure/gcs implementations

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

cool

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

So why do we need to add this?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@electrum there are still tests with no fs.hadoop.enabled values set. When default was changed to false, tests fail with errors like No factory set for location /tmp.. . Hence this change

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should we change those tests to make sure they do have the fs.hadoop.enabled set to on instead

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should we change those tests to make sure they do have the fs.hadoop.enabled set to on instead

We could, but there will be a lot. I followed the pattern in the runner where we default to file metastore when no metastore is set.

Copy link
Contributor

@findinpath findinpath Sep 12, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

When default was changed to false, tests fail with errors like No factory set for location /tmp..

Here is a stacktrace

stacktrace io.trino.spi.TrinoException: Could not read database schema
at io.trino.plugin.hive.metastore.file.FileHiveMetastore.readFile(FileHiveMetastore.java:1406)
at io.trino.plugin.hive.metastore.file.FileHiveMetastore.readSchemaFile(FileHiveMetastore.java:1391)
at io.trino.plugin.hive.metastore.file.FileHiveMetastore.getDatabase(FileHiveMetastore.java:285)
at io.trino.plugin.hive.metastore.file.FileHiveMetastore.createDatabase(FileHiveMetastore.java:198)
at io.trino.metastore.tracing.TracingHiveMetastore.lambda$createDatabase$9(TracingHiveMetastore.java:173)
at io.trino.metastore.tracing.Tracing.lambda$withTracing$0(Tracing.java:31)
at io.trino.metastore.tracing.Tracing.withTracing(Tracing.java:39)
at io.trino.metastore.tracing.Tracing.withTracing(Tracing.java:30)
at io.trino.metastore.tracing.TracingHiveMetastore.createDatabase(TracingHiveMetastore.java:173)
at io.trino.plugin.hive.metastore.cache.CachingHiveMetastore.createDatabase(CachingHiveMetastore.java:572)
at io.trino.plugin.deltalake.TestDeltaLakeProjectionPushdownPlans.createPlanTester(TestDeltaLakeProjectionPushdownPlans.java:119)

It looks to me that we need to have a mapping for io.trino.filesystem.local.LocalFileSystemFactory in FileSystemModule or an extension of FileSystemModule that we are using for testing purposes in order to cope with local fs locations.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Supporting local fs would be cool anyway

hiveProperties.put("fs.hadoop.enabled", "true");
}

queryRunner.installPlugin(new TestingHivePlugin(dataDir, metastore));

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7626,7 +7626,8 @@ public void testDropCorruptedTableWithHiveRedirection()
"iceberg",
ImmutableMap.of(
"iceberg.catalog.type", "TESTING_FILE_METASTORE",
"hive.metastore.catalog.dir", dataDirectory.getPath()));
"hive.metastore.catalog.dir", dataDirectory.getPath(),
"fs.hadoop.enabled", "true"));

queryRunner.installPlugin(new TestingHivePlugin(dataDirectory.toPath()));
queryRunner.createCatalog(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -148,6 +148,10 @@ public DistributedQueryRunner build()
queryRunner.installPlugin(new TpchPlugin());
queryRunner.createCatalog("tpch", "tpch");

if (!icebergProperties.buildOrThrow().containsKey("fs.hadoop.enabled")) {
icebergProperties.put("fs.hadoop.enabled", "true");
}

Path dataDir = metastoreDirectory.map(File::toPath).orElseGet(() -> queryRunner.getCoordinator().getBaseDataDir().resolve("iceberg_data"));
queryRunner.installPlugin(new TestingIcebergPlugin(dataDir));
queryRunner.createCatalog(ICEBERG_CATALOG, "iceberg", icebergProperties.buildOrThrow());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,8 @@ protected QueryRunner createQueryRunner()
queryRunner.createCatalog("iceberg2", "iceberg", Map.of(
"iceberg.catalog.type", "TESTING_FILE_METASTORE",
"hive.metastore.catalog.dir", queryRunner.getCoordinator().getBaseDataDir().resolve("iceberg2-catalog").toString(),
"iceberg.hive-catalog-name", "hive"));
"iceberg.hive-catalog-name", "hive",
"fs.hadoop.enabled", "true"));

secondIceberg = Session.builder(queryRunner.getDefaultSession())
.setCatalog("iceberg2")
Expand All @@ -59,7 +60,8 @@ protected QueryRunner createQueryRunner()
"iceberg.catalog.type", "TESTING_FILE_METASTORE",
"hive.metastore.catalog.dir", queryRunner.getCoordinator().getBaseDataDir().resolve("iceberg_data").toString(),
"iceberg.hive-catalog-name", "hive",
"iceberg.materialized-views.hide-storage-table", "false"));
"iceberg.materialized-views.hide-storage-table", "false",
"fs.hadoop.enabled", "true"));

queryRunner.execute(secondIceberg, "CREATE SCHEMA " + secondIceberg.getSchema().orElseThrow());

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ protected QueryRunner createQueryRunner()

dataDir = queryRunner.getCoordinator().getBaseDataDir().resolve("iceberg_data");
queryRunner.installPlugin(new TestingIcebergPlugin(dataDir, Optional.of(new TestingIcebergFileMetastoreCatalogModule(metastore))));
queryRunner.createCatalog(ICEBERG_CATALOG, "iceberg", ImmutableMap.of("iceberg.register-table-procedure.enabled", "true"));
queryRunner.createCatalog(ICEBERG_CATALOG, "iceberg", ImmutableMap.of("fs.hadoop.enabled", "true", "iceberg.register-table-procedure.enabled", "true"));
queryRunner.execute("CREATE SCHEMA iceberg.tpch");
return queryRunner;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -70,14 +70,16 @@ protected QueryRunner createQueryRunner()
"iceberg",
ImmutableMap.of(
"iceberg.catalog.type", "TESTING_FILE_METASTORE",
"hive.metastore.catalog.dir", dataDirectory.toString()));
"hive.metastore.catalog.dir", dataDirectory.toString(),
"fs.hadoop.enabled", "true"));
queryRunner.createCatalog(
"iceberg_with_redirections",
"iceberg",
ImmutableMap.of(
"iceberg.catalog.type", "TESTING_FILE_METASTORE",
"hive.metastore.catalog.dir", dataDirectory.toString(),
"iceberg.hive-catalog-name", "hive"));
"iceberg.hive-catalog-name", "hive",
"fs.hadoop.enabled", "true"));

queryRunner.installPlugin(new TestingHivePlugin(dataDirectory));
queryRunner.createCatalog(HIVE_CATALOG, "hive");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,8 @@ protected QueryRunner createQueryRunner()
.setIcebergProperties(
ImmutableMap.of(
"iceberg.catalog.type", "glue",
"hive.metastore.glue.default-warehouse-dir", schemaDirectory.getAbsolutePath()))
"hive.metastore.glue.default-warehouse-dir", schemaDirectory.getAbsolutePath(),
"fs.hadoop.enabled", "true"))
.setSchemaInitializer(
SchemaInitializer.builder()
.withClonedTpchTables(ImmutableList.of())
Expand All @@ -73,7 +74,8 @@ protected QueryRunner createQueryRunner()
queryRunner.createCatalog("iceberg_legacy_mv", "iceberg", Map.of(
"iceberg.catalog.type", "glue",
"hive.metastore.glue.default-warehouse-dir", schemaDirectory.getAbsolutePath(),
"iceberg.materialized-views.hide-storage-table", "false"));
"iceberg.materialized-views.hide-storage-table", "false",
"fs.hadoop.enabled", "true"));

queryRunner.installPlugin(createMockConnectorPlugin());
queryRunner.createCatalog("mock", "mock");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ protected QueryRunner createQueryRunner()
dataDirectory.toFile().deleteOnExit();

queryRunner.installPlugin(new TestingIcebergPlugin(dataDirectory, Optional.of(new TestingIcebergGlueCatalogModule(awsGlueAsyncAdapterProvider))));
queryRunner.createCatalog(ICEBERG_CATALOG, "iceberg", ImmutableMap.of());
queryRunner.createCatalog(ICEBERG_CATALOG, "iceberg", ImmutableMap.of("fs.hadoop.enabled", "true"));

glueHiveMetastore = createTestingGlueHiveMetastore(dataDirectory, this::closeAfterClass);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -84,22 +84,24 @@ protected QueryRunner createQueryRunner()
"iceberg",
ImmutableMap.of(
"iceberg.catalog.type", "glue",
"hive.metastore.glue.default-warehouse-dir", dataDirectory.toString()));
"hive.metastore.glue.default-warehouse-dir", dataDirectory.toString(),
"fs.hadoop.enabled", "true"));
queryRunner.createCatalog(
"iceberg_with_redirections",
"iceberg",
ImmutableMap.of(
"iceberg.catalog.type", "glue",
"hive.metastore.glue.default-warehouse-dir", dataDirectory.toString(),
"iceberg.hive-catalog-name", "hive"));
"iceberg.hive-catalog-name", "hive",
"fs.hadoop.enabled", "true"));

this.glueMetastore = createTestingGlueHiveMetastore(dataDirectory, this::closeAfterClass);
queryRunner.installPlugin(new TestingHivePlugin(queryRunner.getCoordinator().getBaseDataDir().resolve("hive_data"), glueMetastore));
queryRunner.createCatalog(HIVE_CATALOG, "hive");
queryRunner.createCatalog(HIVE_CATALOG, "hive", ImmutableMap.of("fs.hadoop.enabled", "true"));
queryRunner.createCatalog(
"hive_with_redirections",
"hive",
ImmutableMap.of("hive.iceberg-catalog-name", "iceberg"));
ImmutableMap.of("hive.iceberg-catalog-name", "iceberg", "fs.hadoop.enabled", "true"));

queryRunner.execute("CREATE SCHEMA " + tpchSchema + " WITH (location = '" + dataDirectory.toUri() + "')");
copyTpchTables(queryRunner, "tpch", TINY_SCHEMA_NAME, icebergSession, ImmutableList.of(TpchTable.NATION));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ private void configureCompatibilityTestContainer(Environment.Builder builder, Co
.withCopyFileToContainer(forHostPath(dockerFiles.getDockerFilesHostPath(jvmConfig)), containerConfigDir + "jvm.config")
.withCopyFileToContainer(forHostPath(configDir.getPath(getConfigFileFor(dockerImage))), containerConfigDir + "config.properties")
.withCopyFileToContainer(forHostPath(configDir.getPath(getHiveConfigFor(dockerImage))), containerConfigDir + "catalog/hive.properties")
.withCopyFileToContainer(forHostPath(configDir.getPath("iceberg.properties")), containerConfigDir + "catalog/iceberg.properties")
.withCopyFileToContainer(forHostPath(configDir.getPath(getIcebergConfigFor(dockerImage))), containerConfigDir + "catalog/iceberg.properties")
.withCopyFileToContainer(forHostPath(dockerFiles.getDockerFilesHostPath()), "/docker/presto-product-tests")
.withStartupCheckStrategy(new IsRunningStartupCheckStrategy())
.waitingForAll(forLogMessage(".*======== SERVER STARTED ========.*", 1), forHealthcheck())
Expand Down Expand Up @@ -120,6 +120,14 @@ private String getHiveConfigFor(String dockerImage)
return "hive.properties";
}

private String getIcebergConfigFor(String dockerImage)
{
if (getVersionFromDockerImageName(dockerImage) < 359) {
return "iceberg_old.properties";
}
return "iceberg.properties";
}

private void configureTestsContainer(Environment.Builder builder, Config config)
{
int version = getVersionFromDockerImageName(config.getCompatibilityTestDockerImage());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,3 +11,4 @@ hive.parquet.time-zone=UTC
hive.rcfile.time-zone=UTC
# Using smaller than default parquet.small-file-threshold to get better code coverage in tests
parquet.small-file-threshold=100kB
fs.hadoop.enabled=true
Original file line number Diff line number Diff line change
Expand Up @@ -6,3 +6,4 @@ hive.hive-views.enabled=true
hive.timestamp-precision=NANOSECONDS
hive.parquet.time-zone=UTC
hive.rcfile.time-zone=UTC
fs.hadoop.enabled=true
Original file line number Diff line number Diff line change
Expand Up @@ -10,3 +10,4 @@ hive.hive-views.enabled=true
hive.non-managed-table-writes-enabled=true
hive.parquet.time-zone=UTC
hive.rcfile.time-zone=UTC
fs.hadoop.enabled=true
Original file line number Diff line number Diff line change
Expand Up @@ -8,3 +8,4 @@ hive.hive-views.run-as-invoker=true
hive.security=sql-standard
hive.parquet.time-zone=UTC
hive.rcfile.time-zone=UTC
fs.hadoop.enabled=true
Original file line number Diff line number Diff line change
Expand Up @@ -3,3 +3,4 @@ hive.metastore.uri=thrift://hadoop-master:9083
hive.config.resources=/docker/presto-product-tests/conf/presto/etc/hive-default-fs-site.xml
iceberg.file-format=PARQUET
iceberg.register-table-procedure.enabled=true
fs.hadoop.enabled=true
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
connector.name=hive
hive.metastore.uri=thrift://host1.invalid:9083
fs.hadoop.enabled=true
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
connector.name=hive
hive.config.resources=/docker/presto-product-tests/conf/presto/etc/hive-default-fs-site.xml
hive.metastore.uri=thrift://hadoop-master:9083
fs.hadoop.enabled=true
fs.cache.enabled=true
fs.cache.directories=/tmp/cache/hive
fs.cache.max-disk-usage-percentages=90
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ hive.config.resources=/docker/presto-product-tests/conf/presto/etc/hive-default-
hive.metastore-cache-ttl=0s
hive.parquet.time-zone=UTC
hive.rcfile.time-zone=UTC
fs.hadoop.enabled=true

hive.metastore.authentication.type=KERBEROS
hive.metastore.service.principal=hive/[email protected]
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
connector.name=iceberg
hive.metastore.uri=thrift://hadoop-master:9083
hive.config.resources=/docker/presto-product-tests/conf/presto/etc/hive-default-fs-site.xml
fs.hadoop.enabled=true

hive.metastore.authentication.type=KERBEROS
hive.metastore.service.principal=hive/[email protected]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,3 +2,4 @@ connector.name=hive
hive.metastore.uri=thrift://hadoop-master:9083
hive.parquet.time-zone=UTC
hive.rcfile.time-zone=UTC
fs.hadoop.enabled=true
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
connector.name=iceberg
hive.metastore.uri=thrift://hadoop-master:9083
fs.hadoop.enabled=true
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
connector.name=iceberg
hive.metastore.uri=thrift://hadoop-master:9083
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
connector.name=delta_lake
hive.metastore.uri=thrift://hadoop-master:9083
fs.hadoop.enabled=true

hive.metastore.authentication.type=KERBEROS
hive.metastore.service.principal=hive/[email protected]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,3 +10,4 @@ hive.max-partitions-for-eager-load=100
hive.non-managed-table-writes-enabled=true
hive.parquet.time-zone=UTC
hive.rcfile.time-zone=UTC
fs.hadoop.enabled=true
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
connector.name=iceberg
hive.metastore.uri=thrift://hadoop-master:9083
hive.config.resources=/docker/presto-product-tests/conf/presto/etc/hive-default-fs-site.xml
fs.hadoop.enabled=true

hive.hdfs.authentication.type=NONE
hive.hdfs.impersonation.enabled=true
Expand Down
Loading
Loading