From b2137b5782a667d5cc3aff2066cb1961f2c45ef6 Mon Sep 17 00:00:00 2001 From: PHILO-HE Date: Fri, 23 Aug 2024 11:10:51 +0800 Subject: [PATCH] Workaround fix --- .../spark/sql/sources/GlutenInsertSuite.scala | 13 ++++++++----- .../spark/sql/sources/GlutenInsertSuite.scala | 13 ++++++++----- 2 files changed, 16 insertions(+), 10 deletions(-) diff --git a/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/sources/GlutenInsertSuite.scala b/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/sources/GlutenInsertSuite.scala index db90df14e3fdd..290a8fa18a519 100644 --- a/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/sources/GlutenInsertSuite.scala +++ b/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/sources/GlutenInsertSuite.scala @@ -114,16 +114,19 @@ class GlutenInsertSuite } } - testGluten("Cleanup staging files if job is failed") { - withTable("t1") { - spark.sql("CREATE TABLE t1 (c1 int, c2 string) USING PARQUET") - val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t1")) + testGluten("Cleanup staging files if job failed") { + // Using a unique table name in this test. Sometimes, the table is not removed for some unknown + // reason, which can cause test failure (location already exists) if other following tests have + // the same table name. + withTable("tbl") { + spark.sql("CREATE TABLE tbl (c1 int, c2 string) USING PARQUET") + val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("tbl")) assert(new File(table.location).list().length == 0) intercept[Exception] { spark.sql( """ - |INSERT INTO TABLE t1 + |INSERT INTO TABLE tbl |SELECT id, assert_true(SPARK_PARTITION_ID() = 1) FROM range(1, 3, 1, 2) |""".stripMargin ) diff --git a/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/sources/GlutenInsertSuite.scala b/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/sources/GlutenInsertSuite.scala index dd0567f546031..dc97be44cb701 100644 --- a/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/sources/GlutenInsertSuite.scala +++ b/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/sources/GlutenInsertSuite.scala @@ -116,16 +116,19 @@ class GlutenInsertSuite } } - testGluten("Cleanup staging files if job is failed") { - withTable("t1") { - spark.sql("CREATE TABLE t1 (c1 int, c2 string) USING PARQUET") - val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t1")) + testGluten("Cleanup staging files if job failed") { + // Using a unique table name in this test. Sometimes, the table is not removed for some unknown + // reason, which can cause test failure (location already exists) if other following tests have + // the same table name. + withTable("tbl") { + spark.sql("CREATE TABLE tbl (c1 int, c2 string) USING PARQUET") + val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("tbl")) assert(new File(table.location).list().length == 0) intercept[Exception] { spark.sql( """ - |INSERT INTO TABLE t1 + |INSERT INTO TABLE tbl |SELECT id, assert_true(SPARK_PARTITION_ID() = 1) FROM range(1, 3, 1, 2) |""".stripMargin )