From 273bee9166e7187a8e7188f645bdbb3cedbe6f0c Mon Sep 17 00:00:00 2001 From: Yangyang Gao Date: Wed, 6 Dec 2023 15:58:17 +0800 Subject: [PATCH] fix issue after merge main --- .../io/glutenproject/execution/BasicScanExecTransformer.scala | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/gluten-core/src/main/scala/io/glutenproject/execution/BasicScanExecTransformer.scala b/gluten-core/src/main/scala/io/glutenproject/execution/BasicScanExecTransformer.scala index bd7185af0e4ca..f0f9b1f7b8ef1 100644 --- a/gluten-core/src/main/scala/io/glutenproject/execution/BasicScanExecTransformer.scala +++ b/gluten-core/src/main/scala/io/glutenproject/execution/BasicScanExecTransformer.scala @@ -26,7 +26,7 @@ import io.glutenproject.substrait.rel.{ReadRelNode, RelBuilder, SplitInfo} import io.glutenproject.substrait.rel.LocalFilesNode.ReadFileFormat import org.apache.spark.rdd.RDD -import org.apache.spark.sql.catalyst.expressions.{And, Attribute, Expression} +import org.apache.spark.sql.catalyst.expressions.{And, Attribute, AttributeReference, Expression} import org.apache.spark.sql.vectorized.ColumnarBatch import com.google.common.collect.Lists @@ -34,6 +34,7 @@ import com.google.common.collect.Lists import scala.collection.JavaConverters._ trait BasicScanExecTransformer extends LeafTransformSupport with BaseDataSource { + import org.apache.spark.sql.catalyst.util._ /** Returns the filters that can be pushed down to native file scan */ def filterExprs(hasMetadataColFilters: Boolean = true): Seq[Expression] @@ -60,6 +61,7 @@ trait BasicScanExecTransformer extends LeafTransformSupport with BaseDataSource getPartitions.map( BackendsApiManager.getIteratorApiInstance .genSplitInfo(_, getPartitionSchema, fileFormat, getMetadataColumns.map(_.name))) + } def doExecuteColumnarInternal(): RDD[ColumnarBatch] = { val numOutputRows = longMetric("outputRows")