Skip to content

Commit

Permalink
Fix
Browse files Browse the repository at this point in the history
  • Loading branch information
viirya committed Jul 30, 2024
1 parent 0563a69 commit 4bd65c7
Showing 1 changed file with 30 additions and 10 deletions.
40 changes: 30 additions & 10 deletions dev/diffs/4.0.0-preview1.diff
Original file line number Diff line number Diff line change
Expand Up @@ -426,7 +426,7 @@ index 16a493b5290..3f0b70e2d59 100644
assert(exchanges.size == 2)
}
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DynamicPartitionPruningSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DynamicPartitionPruningSuite.scala
index 2c24cc7d570..d46dc5e138a 100644
index 2c24cc7d570..4441ffc2f02 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/DynamicPartitionPruningSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/DynamicPartitionPruningSuite.scala
@@ -22,6 +22,7 @@ import org.scalatest.GivenWhenThen
Expand Down Expand Up @@ -477,7 +477,17 @@ index 2c24cc7d570..d46dc5e138a 100644
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_REUSE_BROADCAST_ONLY.key -> "true") {
withTable("fact", "dim") {
spark.range(100).select(
@@ -1187,7 +1194,8 @@ abstract class DynamicPartitionPruningSuiteBase
@@ -1027,7 +1034,8 @@ abstract class DynamicPartitionPruningSuiteBase
}
}

- test("avoid reordering broadcast join keys to match input hash partitioning") {
+ test("avoid reordering broadcast join keys to match input hash partitioning",
+ IgnoreComet("TODO: Support SubqueryBroadcastExec in Comet: #242")) {
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_REUSE_BROADCAST_ONLY.key -> "false",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {
withTable("large", "dimTwo", "dimThree") {
@@ -1187,7 +1195,8 @@ abstract class DynamicPartitionPruningSuiteBase
}
}

Expand All @@ -487,7 +497,7 @@ index 2c24cc7d570..d46dc5e138a 100644
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_REUSE_BROADCAST_ONLY.key -> "true") {
val df = sql(
"""
@@ -1238,7 +1246,8 @@ abstract class DynamicPartitionPruningSuiteBase
@@ -1238,7 +1247,8 @@ abstract class DynamicPartitionPruningSuiteBase
}
}

Expand All @@ -497,7 +507,7 @@ index 2c24cc7d570..d46dc5e138a 100644
Given("dynamic pruning filter on the build side")
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_REUSE_BROADCAST_ONLY.key -> "true") {
val df = sql(
@@ -1311,7 +1320,8 @@ abstract class DynamicPartitionPruningSuiteBase
@@ -1311,7 +1321,8 @@ abstract class DynamicPartitionPruningSuiteBase
}
}

Expand All @@ -507,7 +517,17 @@ index 2c24cc7d570..d46dc5e138a 100644
withSQLConf(
SQLConf.DYNAMIC_PARTITION_PRUNING_ENABLED.key -> "true",
SQLConf.DYNAMIC_PARTITION_PRUNING_REUSE_BROADCAST_ONLY.key -> "true",
@@ -1471,7 +1481,8 @@ abstract class DynamicPartitionPruningSuiteBase
@@ -1424,7 +1435,8 @@ abstract class DynamicPartitionPruningSuiteBase
}
}

- test("SPARK-34637: DPP side broadcast query stage is created firstly") {
+ test("SPARK-34637: DPP side broadcast query stage is created firstly",
+ IgnoreComet("TODO: Support SubqueryBroadcastExec in Comet: #242")) {
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_REUSE_BROADCAST_ONLY.key -> "true") {
val df = sql(
""" WITH v as (
@@ -1471,7 +1483,8 @@ abstract class DynamicPartitionPruningSuiteBase
checkAnswer(df, Row(3, 2) :: Row(3, 2) :: Row(3, 2) :: Row(3, 2) :: Nil)
}

Expand All @@ -517,7 +537,7 @@ index 2c24cc7d570..d46dc5e138a 100644
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_ENABLED.key -> "true") {
val df = sql(
"""
@@ -1486,7 +1497,7 @@ abstract class DynamicPartitionPruningSuiteBase
@@ -1486,7 +1499,7 @@ abstract class DynamicPartitionPruningSuiteBase
}

test("SPARK-38148: Do not add dynamic partition pruning if there exists static partition " +
Expand All @@ -526,7 +546,7 @@ index 2c24cc7d570..d46dc5e138a 100644
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_ENABLED.key -> "true") {
Seq(
"f.store_id = 1" -> false,
@@ -1558,7 +1569,8 @@ abstract class DynamicPartitionPruningSuiteBase
@@ -1558,7 +1571,8 @@ abstract class DynamicPartitionPruningSuiteBase
}
}

Expand All @@ -536,7 +556,7 @@ index 2c24cc7d570..d46dc5e138a 100644
withTable("duplicate_keys") {
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_ENABLED.key -> "true") {
Seq[(Int, String)]((1, "NL"), (1, "NL"), (3, "US"), (3, "US"), (3, "US"))
@@ -1589,7 +1601,8 @@ abstract class DynamicPartitionPruningSuiteBase
@@ -1589,7 +1603,8 @@ abstract class DynamicPartitionPruningSuiteBase
}
}

Expand All @@ -546,7 +566,7 @@ index 2c24cc7d570..d46dc5e138a 100644
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_ENABLED.key -> "true") {
val df = sql(
"""
@@ -1618,7 +1631,8 @@ abstract class DynamicPartitionPruningSuiteBase
@@ -1618,7 +1633,8 @@ abstract class DynamicPartitionPruningSuiteBase
}
}

Expand All @@ -556,7 +576,7 @@ index 2c24cc7d570..d46dc5e138a 100644
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_ENABLED.key -> "true") {
val df = sql(
"""
@@ -1730,6 +1744,8 @@ abstract class DynamicPartitionPruningV1Suite extends DynamicPartitionPruningDat
@@ -1730,6 +1746,8 @@ abstract class DynamicPartitionPruningV1Suite extends DynamicPartitionPruningDat
case s: BatchScanExec =>
// we use f1 col for v2 tables due to schema pruning
s.output.exists(_.exists(_.argString(maxFields = 100).contains("f1")))
Expand Down

0 comments on commit 4bd65c7

Please sign in to comment.