Skip to content

Commit

Permalink
Enable mergetree hdfs suite
Browse files Browse the repository at this point in the history
  • Loading branch information
loneylee committed Jun 13, 2024
1 parent 00fee1d commit aac3316
Show file tree
Hide file tree
Showing 4 changed files with 9 additions and 9 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ class GlutenClickHouseMergeTreeWriteOnHDFSSuite
FileUtils.deleteDirectory(new File(HDFS_METADATA_PATH))
}

ignore("test mergetree table write") {
test("test mergetree table write") {
spark.sql(s"""
|DROP TABLE IF EXISTS lineitem_mergetree_hdfs;
|""".stripMargin)
Expand Down Expand Up @@ -157,7 +157,7 @@ class GlutenClickHouseMergeTreeWriteOnHDFSSuite
spark.sql("drop table lineitem_mergetree_hdfs")
}

ignore("test mergetree write with orderby keys / primary keys") {
test("test mergetree write with orderby keys / primary keys") {
spark.sql(s"""
|DROP TABLE IF EXISTS lineitem_mergetree_orderbykey_hdfs;
|""".stripMargin)
Expand Down Expand Up @@ -254,7 +254,7 @@ class GlutenClickHouseMergeTreeWriteOnHDFSSuite
spark.sql("drop table lineitem_mergetree_orderbykey_hdfs")
}

ignore("test mergetree write with partition") {
test("test mergetree write with partition") {
spark.sql(s"""
|DROP TABLE IF EXISTS lineitem_mergetree_partition_hdfs;
|""".stripMargin)
Expand Down Expand Up @@ -435,7 +435,7 @@ class GlutenClickHouseMergeTreeWriteOnHDFSSuite
spark.sql("drop table lineitem_mergetree_partition_hdfs")
}

ignore("test mergetree write with bucket table") {
test("test mergetree write with bucket table") {
spark.sql(s"""
|DROP TABLE IF EXISTS lineitem_mergetree_bucket_hdfs;
|""".stripMargin)
Expand Down Expand Up @@ -537,7 +537,7 @@ class GlutenClickHouseMergeTreeWriteOnHDFSSuite
spark.sql("drop table lineitem_mergetree_bucket_hdfs")
}

ignore("test mergetree write with the path based") {
test("test mergetree write with the path based") {
val dataPath = s"$HDFS_URL/test/lineitem_mergetree_bucket_hdfs"

val sourceDF = spark.sql(s"""
Expand Down
4 changes: 2 additions & 2 deletions cpp-ch/local-engine/Disks/ObjectStorages/GlutenDiskHDFS.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -40,8 +40,8 @@ String GlutenDiskHDFS::path2AbsPath(const String & path)
void GlutenDiskHDFS::createDirectories(const String & path)
{
DiskObjectStorage::createDirectories(path);
auto* hdfs = hdfs_object_storage->getHDFSFS();
fs::path p = path;
auto * hdfs = hdfs_object_storage->getHDFSFS();
fs::path p = "/" + path;
std::vector<std::string> paths_created;
while (hdfsExists(hdfs, p.c_str()) < 0)
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ DB::ObjectStorageKey local_engine::GlutenHDFSObjectStorage::generateObjectKeyFor
initializeHDFSFS();
/// what ever data_source_description.description value is, consider that key as relative key
chassert(data_directory.starts_with("/"));
return ObjectStorageKey::createAsRelative(fs::path(url_without_path) / data_directory.substr(1) / path);
return ObjectStorageKey::createAsRelative(fs::path(url_without_path) / data_directory.substr(1), path);
}
}
#endif
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ class GlutenHDFSObjectStorage final : public DB::HDFSObjectStorage
const String & hdfs_root_path_,
SettingsPtr settings_,
const Poco::Util::AbstractConfiguration & config_)
: HDFSObjectStorage(hdfs_root_path_, std::move(settings_), config_, /* lazy_initialize */true), config(config_)
: HDFSObjectStorage(hdfs_root_path_, std::move(settings_), config_, /* lazy_initialize */false), config(config_)
{
}
std::unique_ptr<DB::ReadBufferFromFileBase> readObject( /// NOLINT
Expand Down

0 comments on commit aac3316

Please sign in to comment.