-
Notifications
You must be signed in to change notification settings - Fork 99
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[Performance Improvement] Support for AQE mode for delayed query pushdown for optimum runtime & improved debugging #535
Open
jalpan-randeri
wants to merge
1
commit into
snowflakedb:master
Choose a base branch
from
jalpan-randeri:jalpan/aqe-improvement
base: master
Could not load branches
Branch not found: {{ refName }}
Loading
Could not load tags
Nothing to show
Loading
Are you sure you want to change the base?
Some commits from the old base branch may be removed from the timeline,
and old review comments may become outdated.
Open
Changes from all commits
Commits
File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
75 changes: 75 additions & 0 deletions
75
src/main/scala/net/snowflake/spark/snowflake/pushdowns/SnowflakeScanExec.scala
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,75 @@ | ||
package net.snowflake.spark.snowflake.pushdowns | ||
|
||
import net.snowflake.spark.snowflake.{SnowflakeRelation, SnowflakeSQLStatement} | ||
import org.apache.spark.rdd.RDD | ||
import org.apache.spark.sql.catalyst.InternalRow | ||
import org.apache.spark.sql.catalyst.expressions.{Attribute, UnsafeProjection} | ||
import org.apache.spark.sql.execution.LeafExecNode | ||
|
||
import java.util.concurrent.{Callable, ExecutorService, Executors, Future} | ||
|
||
/** | ||
* Snowflake Scan Plan for pushing query fragment to snowflake endpoint | ||
* | ||
* @param projection projected columns | ||
* @param snowflakeSQL SQL query that is pushed to snowflake for evaluation | ||
* @param relation Snowflake datasource | ||
*/ | ||
case class SnowflakeScanExec(projection: Seq[Attribute], | ||
snowflakeSQL: SnowflakeSQLStatement, | ||
relation: SnowflakeRelation) extends LeafExecNode { | ||
// result holder | ||
@transient implicit private var data: Future[PushDownResult] = _ | ||
@transient implicit private val service: ExecutorService = Executors.newCachedThreadPool() | ||
|
||
override protected def doPrepare(): Unit = { | ||
logInfo(s"Preparing query to push down - $snowflakeSQL") | ||
|
||
val work = new Callable[PushDownResult]() { | ||
override def call(): PushDownResult = { | ||
val result = { | ||
try { | ||
val data = relation.buildScanFromSQL[InternalRow](snowflakeSQL, Some(schema)) | ||
PushDownResult(data = Some(data)) | ||
} catch { | ||
case e: Exception => | ||
logError("Failure in query execution", e) | ||
PushDownResult(failure = Some(e)) | ||
} | ||
} | ||
result | ||
} | ||
} | ||
data = service.submit(work) | ||
logInfo("submitted query asynchronously") | ||
} | ||
|
||
override protected def doExecute(): RDD[InternalRow] = { | ||
if (data.get().failure.nonEmpty) { | ||
// raise original exception | ||
throw data.get().failure.get | ||
} | ||
|
||
data.get().data.get.mapPartitions { iter => | ||
val project = UnsafeProjection.create(schema) | ||
iter.map(project) | ||
} | ||
} | ||
|
||
override def cleanupResources(): Unit = { | ||
logDebug(s"shutting down service to clean up resources") | ||
service.shutdown() | ||
} | ||
|
||
override def output: Seq[Attribute] = projection | ||
} | ||
|
||
/** | ||
* Result holder | ||
* | ||
* @param data RDD that holds the data | ||
* @param failure failure information if we unable to push down | ||
*/ | ||
private case class PushDownResult(data: Option[RDD[InternalRow]] = None, | ||
failure: Option[Exception] = None) | ||
extends Serializable |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
62 changes: 62 additions & 0 deletions
62
src/test/scala/net/snowflake/spark/snowflake/SparkQuerySuite.scala
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,62 @@ | ||
package net.snowflake.spark.snowflake | ||
|
||
import net.snowflake.spark.snowflake.pushdowns.SnowflakeScanExec | ||
import org.apache.spark.sql.SparkSession | ||
import org.apache.spark.sql.execution.{ExplainMode, FormattedMode} | ||
import org.scalatest.{BeforeAndAfter, FunSuite} | ||
|
||
class SparkQuerySuite extends FunSuite with BeforeAndAfter { | ||
private var spark: SparkSession = _ | ||
|
||
before { | ||
spark = SparkSession | ||
.builder() | ||
.master("local[2]") | ||
.getOrCreate() | ||
} | ||
|
||
after { | ||
spark.stop() | ||
} | ||
|
||
test("pushdown scan to snowflake") { | ||
spark.sql( | ||
""" | ||
CREATE TABLE student(name string) | ||
USING net.snowflake.spark.snowflake | ||
OPTIONS (dbtable 'default.student', | ||
sfdatabase 'sf-db', | ||
tempdir '/tmp/dir', | ||
sfurl 'accountname.snowflakecomputing.com:443', | ||
sfuser 'alice', | ||
sfpassword 'hello-snowflake') | ||
""").show() | ||
|
||
val df = spark.sql( | ||
""" | ||
|SELECT * | ||
| FROM student | ||
|""".stripMargin) | ||
val plan = df.queryExecution.executedPlan | ||
|
||
assert(plan.isInstanceOf[SnowflakeScanExec]) | ||
val sfPlan = plan.asInstanceOf[SnowflakeScanExec] | ||
assert(sfPlan.snowflakeSQL.toString == | ||
"""SELECT * FROM ( default.student ) AS "SF_CONNECTOR_QUERY_ALIAS"""" | ||
.stripMargin) | ||
|
||
// explain plan | ||
val planString = df.queryExecution.explainString(FormattedMode) | ||
val expectedString = | ||
"""== Physical Plan == | ||
|SnowflakeScan (1) | ||
| | ||
| | ||
|(1) SnowflakeScan | ||
|Output [1]: [name#1] | ||
|Arguments: [name#1], SELECT * FROM ( default.student ) AS "SF_CONNECTOR_QUERY_ALIAS", SnowflakeRelation | ||
""".stripMargin | ||
assert(planString.trim == expectedString.trim) | ||
} | ||
|
||
} |
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
What is benefit of building RDD in doPrepare instead of doExecute?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The doPrepare method allows spark planner in performing initial metadata collection work in async fashion. While do execute is always blocking call. Thus leveraging doPrepare we can move some work such as building sql and creating connection with snowflake in background thread while main planner operates on other nodes in the plan giving some perf gains