diff --git a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseMergeTreeWriteOnHDFSSuite.scala b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseMergeTreeWriteOnHDFSSuite.scala index ca5b39fff1ace..56b8f056bc258 100644 --- a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseMergeTreeWriteOnHDFSSuite.scala +++ b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseMergeTreeWriteOnHDFSSuite.scala @@ -74,7 +74,7 @@ class GlutenClickHouseMergeTreeWriteOnHDFSSuite FileUtils.deleteDirectory(new File(HDFS_METADATA_PATH)) } - ignore("test mergetree table write") { + test("test mergetree table write") { spark.sql(s""" |DROP TABLE IF EXISTS lineitem_mergetree_hdfs; |""".stripMargin) @@ -157,7 +157,7 @@ class GlutenClickHouseMergeTreeWriteOnHDFSSuite spark.sql("drop table lineitem_mergetree_hdfs") } - ignore("test mergetree write with orderby keys / primary keys") { + test("test mergetree write with orderby keys / primary keys") { spark.sql(s""" |DROP TABLE IF EXISTS lineitem_mergetree_orderbykey_hdfs; |""".stripMargin) @@ -254,7 +254,7 @@ class GlutenClickHouseMergeTreeWriteOnHDFSSuite spark.sql("drop table lineitem_mergetree_orderbykey_hdfs") } - ignore("test mergetree write with partition") { + test("test mergetree write with partition") { spark.sql(s""" |DROP TABLE IF EXISTS lineitem_mergetree_partition_hdfs; |""".stripMargin) @@ -435,7 +435,7 @@ class GlutenClickHouseMergeTreeWriteOnHDFSSuite spark.sql("drop table lineitem_mergetree_partition_hdfs") } - ignore("test mergetree write with bucket table") { + test("test mergetree write with bucket table") { spark.sql(s""" |DROP TABLE IF EXISTS lineitem_mergetree_bucket_hdfs; |""".stripMargin) @@ -537,7 +537,7 @@ class GlutenClickHouseMergeTreeWriteOnHDFSSuite spark.sql("drop table lineitem_mergetree_bucket_hdfs") } - ignore("test mergetree write with the path based") { + test("test mergetree write with the path based") { val dataPath = s"$HDFS_URL/test/lineitem_mergetree_bucket_hdfs" val sourceDF = spark.sql(s""" diff --git a/cpp-ch/local-engine/Disks/ObjectStorages/GlutenDiskHDFS.cpp b/cpp-ch/local-engine/Disks/ObjectStorages/GlutenDiskHDFS.cpp index cdbe6c72897c6..e68c51553abd3 100644 --- a/cpp-ch/local-engine/Disks/ObjectStorages/GlutenDiskHDFS.cpp +++ b/cpp-ch/local-engine/Disks/ObjectStorages/GlutenDiskHDFS.cpp @@ -40,8 +40,8 @@ String GlutenDiskHDFS::path2AbsPath(const String & path) void GlutenDiskHDFS::createDirectories(const String & path) { DiskObjectStorage::createDirectories(path); - auto* hdfs = hdfs_object_storage->getHDFSFS(); - fs::path p = path; + auto * hdfs = hdfs_object_storage->getHDFSFS(); + fs::path p = "/" + path; std::vector paths_created; while (hdfsExists(hdfs, p.c_str()) < 0) { diff --git a/cpp-ch/local-engine/Disks/ObjectStorages/GlutenHDFSObjectStorage.cpp b/cpp-ch/local-engine/Disks/ObjectStorages/GlutenHDFSObjectStorage.cpp index 60b82ec845bb4..cab87d66d8848 100644 --- a/cpp-ch/local-engine/Disks/ObjectStorages/GlutenHDFSObjectStorage.cpp +++ b/cpp-ch/local-engine/Disks/ObjectStorages/GlutenHDFSObjectStorage.cpp @@ -38,7 +38,7 @@ DB::ObjectStorageKey local_engine::GlutenHDFSObjectStorage::generateObjectKeyFor initializeHDFSFS(); /// what ever data_source_description.description value is, consider that key as relative key chassert(data_directory.starts_with("/")); - return ObjectStorageKey::createAsRelative(fs::path(url_without_path) / data_directory.substr(1) / path); + return ObjectStorageKey::createAsRelative(fs::path(url_without_path) / data_directory.substr(1), path); } } #endif diff --git a/cpp-ch/local-engine/Disks/ObjectStorages/GlutenHDFSObjectStorage.h b/cpp-ch/local-engine/Disks/ObjectStorages/GlutenHDFSObjectStorage.h index a532c98cb87d5..149ed3848aabd 100644 --- a/cpp-ch/local-engine/Disks/ObjectStorages/GlutenHDFSObjectStorage.h +++ b/cpp-ch/local-engine/Disks/ObjectStorages/GlutenHDFSObjectStorage.h @@ -33,7 +33,7 @@ class GlutenHDFSObjectStorage final : public DB::HDFSObjectStorage const String & hdfs_root_path_, SettingsPtr settings_, const Poco::Util::AbstractConfiguration & config_) - : HDFSObjectStorage(hdfs_root_path_, std::move(settings_), config_, /* lazy_initialize */true), config(config_) + : HDFSObjectStorage(hdfs_root_path_, std::move(settings_), config_, /* lazy_initialize */false), config(config_) { } std::unique_ptr readObject( /// NOLINT