Skip to content

Commit

Permalink
Fix
Browse files Browse the repository at this point in the history
  • Loading branch information
viirya committed Jul 29, 2024
1 parent 3f841c0 commit 2f0f455
Showing 1 changed file with 30 additions and 10 deletions.
40 changes: 30 additions & 10 deletions dev/diffs/3.5.1.diff
Original file line number Diff line number Diff line change
Expand Up @@ -342,7 +342,7 @@ index c2fe31520ac..0f54b233d14 100644
assert(exchanges.size == 2)
}
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DynamicPartitionPruningSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DynamicPartitionPruningSuite.scala
index f33432ddb6f..9cf7a9dd4e3 100644
index f33432ddb6f..aef20714168 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/DynamicPartitionPruningSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/DynamicPartitionPruningSuite.scala
@@ -22,6 +22,7 @@ import org.scalatest.GivenWhenThen
Expand Down Expand Up @@ -393,7 +393,17 @@ index f33432ddb6f..9cf7a9dd4e3 100644
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_REUSE_BROADCAST_ONLY.key -> "true") {
withTable("fact", "dim") {
spark.range(100).select(
@@ -1187,7 +1194,8 @@ abstract class DynamicPartitionPruningSuiteBase
@@ -1027,7 +1034,8 @@ abstract class DynamicPartitionPruningSuiteBase
}
}

- test("avoid reordering broadcast join keys to match input hash partitioning") {
+ test("avoid reordering broadcast join keys to match input hash partitioning",
+ IgnoreComet("TODO: Support SubqueryBroadcastExec in Comet: #242")) {
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_REUSE_BROADCAST_ONLY.key -> "false",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {
withTable("large", "dimTwo", "dimThree") {
@@ -1187,7 +1195,8 @@ abstract class DynamicPartitionPruningSuiteBase
}
}

Expand All @@ -403,7 +413,7 @@ index f33432ddb6f..9cf7a9dd4e3 100644
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_REUSE_BROADCAST_ONLY.key -> "true") {
val df = sql(
"""
@@ -1238,7 +1246,8 @@ abstract class DynamicPartitionPruningSuiteBase
@@ -1238,7 +1247,8 @@ abstract class DynamicPartitionPruningSuiteBase
}
}

Expand All @@ -413,7 +423,7 @@ index f33432ddb6f..9cf7a9dd4e3 100644
Given("dynamic pruning filter on the build side")
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_REUSE_BROADCAST_ONLY.key -> "true") {
val df = sql(
@@ -1311,7 +1320,8 @@ abstract class DynamicPartitionPruningSuiteBase
@@ -1311,7 +1321,8 @@ abstract class DynamicPartitionPruningSuiteBase
}
}

Expand All @@ -423,7 +433,17 @@ index f33432ddb6f..9cf7a9dd4e3 100644
withSQLConf(
SQLConf.DYNAMIC_PARTITION_PRUNING_ENABLED.key -> "true",
SQLConf.DYNAMIC_PARTITION_PRUNING_REUSE_BROADCAST_ONLY.key -> "true",
@@ -1470,7 +1480,8 @@ abstract class DynamicPartitionPruningSuiteBase
@@ -1423,7 +1434,8 @@ abstract class DynamicPartitionPruningSuiteBase
}
}

- test("SPARK-34637: DPP side broadcast query stage is created firstly") {
+ test("SPARK-34637: DPP side broadcast query stage is created firstly",
+ IgnoreComet("TODO: Support SubqueryBroadcastExec in Comet: #242")) {
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_REUSE_BROADCAST_ONLY.key -> "true") {
val df = sql(
""" WITH v as (
@@ -1470,7 +1482,8 @@ abstract class DynamicPartitionPruningSuiteBase
checkAnswer(df, Row(3, 2) :: Row(3, 2) :: Row(3, 2) :: Row(3, 2) :: Nil)
}

Expand All @@ -433,7 +453,7 @@ index f33432ddb6f..9cf7a9dd4e3 100644
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_ENABLED.key -> "true") {
val df = sql(
"""
@@ -1485,7 +1496,7 @@ abstract class DynamicPartitionPruningSuiteBase
@@ -1485,7 +1498,7 @@ abstract class DynamicPartitionPruningSuiteBase
}

test("SPARK-38148: Do not add dynamic partition pruning if there exists static partition " +
Expand All @@ -442,7 +462,7 @@ index f33432ddb6f..9cf7a9dd4e3 100644
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_ENABLED.key -> "true") {
Seq(
"f.store_id = 1" -> false,
@@ -1557,7 +1568,8 @@ abstract class DynamicPartitionPruningSuiteBase
@@ -1557,7 +1570,8 @@ abstract class DynamicPartitionPruningSuiteBase
}
}

Expand All @@ -452,7 +472,7 @@ index f33432ddb6f..9cf7a9dd4e3 100644
withTable("duplicate_keys") {
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_ENABLED.key -> "true") {
Seq[(Int, String)]((1, "NL"), (1, "NL"), (3, "US"), (3, "US"), (3, "US"))
@@ -1588,7 +1600,8 @@ abstract class DynamicPartitionPruningSuiteBase
@@ -1588,7 +1602,8 @@ abstract class DynamicPartitionPruningSuiteBase
}
}

Expand All @@ -462,7 +482,7 @@ index f33432ddb6f..9cf7a9dd4e3 100644
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_ENABLED.key -> "true") {
val df = sql(
"""
@@ -1617,7 +1630,8 @@ abstract class DynamicPartitionPruningSuiteBase
@@ -1617,7 +1632,8 @@ abstract class DynamicPartitionPruningSuiteBase
}
}

Expand All @@ -472,7 +492,7 @@ index f33432ddb6f..9cf7a9dd4e3 100644
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_ENABLED.key -> "true") {
val df = sql(
"""
@@ -1729,6 +1743,8 @@ abstract class DynamicPartitionPruningV1Suite extends DynamicPartitionPruningDat
@@ -1729,6 +1745,8 @@ abstract class DynamicPartitionPruningV1Suite extends DynamicPartitionPruningDat
case s: BatchScanExec =>
// we use f1 col for v2 tables due to schema pruning
s.output.exists(_.exists(_.argString(maxFields = 100).contains("f1")))
Expand Down

0 comments on commit 2f0f455

Please sign in to comment.