Skip to content

Commit

Permalink
Workaround fix
Browse files Browse the repository at this point in the history
  • Loading branch information
PHILO-HE committed Aug 23, 2024
1 parent cae0bf8 commit b2137b5
Show file tree
Hide file tree
Showing 2 changed files with 16 additions and 10 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -114,16 +114,19 @@ class GlutenInsertSuite
}
}

testGluten("Cleanup staging files if job is failed") {
withTable("t1") {
spark.sql("CREATE TABLE t1 (c1 int, c2 string) USING PARQUET")
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t1"))
testGluten("Cleanup staging files if job failed") {
// Using a unique table name in this test. Sometimes, the table is not removed for some unknown
// reason, which can cause test failure (location already exists) if other following tests have
// the same table name.
withTable("tbl") {
spark.sql("CREATE TABLE tbl (c1 int, c2 string) USING PARQUET")
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("tbl"))
assert(new File(table.location).list().length == 0)

intercept[Exception] {
spark.sql(
"""
|INSERT INTO TABLE t1
|INSERT INTO TABLE tbl
|SELECT id, assert_true(SPARK_PARTITION_ID() = 1) FROM range(1, 3, 1, 2)
|""".stripMargin
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -116,16 +116,19 @@ class GlutenInsertSuite
}
}

testGluten("Cleanup staging files if job is failed") {
withTable("t1") {
spark.sql("CREATE TABLE t1 (c1 int, c2 string) USING PARQUET")
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t1"))
testGluten("Cleanup staging files if job failed") {
// Using a unique table name in this test. Sometimes, the table is not removed for some unknown
// reason, which can cause test failure (location already exists) if other following tests have
// the same table name.
withTable("tbl") {
spark.sql("CREATE TABLE tbl (c1 int, c2 string) USING PARQUET")
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("tbl"))
assert(new File(table.location).list().length == 0)

intercept[Exception] {
spark.sql(
"""
|INSERT INTO TABLE t1
|INSERT INTO TABLE tbl
|SELECT id, assert_true(SPARK_PARTITION_ID() = 1) FROM range(1, 3, 1, 2)
|""".stripMargin
)
Expand Down

0 comments on commit b2137b5

Please sign in to comment.