diff --git a/gluten-core/src/main/scala/io/glutenproject/execution/BasicScanExecTransformer.scala b/gluten-core/src/main/scala/io/glutenproject/execution/BasicScanExecTransformer.scala index bd7185af0e4ca..f0f9b1f7b8ef1 100644 --- a/gluten-core/src/main/scala/io/glutenproject/execution/BasicScanExecTransformer.scala +++ b/gluten-core/src/main/scala/io/glutenproject/execution/BasicScanExecTransformer.scala @@ -26,7 +26,7 @@ import io.glutenproject.substrait.rel.{ReadRelNode, RelBuilder, SplitInfo} import io.glutenproject.substrait.rel.LocalFilesNode.ReadFileFormat import org.apache.spark.rdd.RDD -import org.apache.spark.sql.catalyst.expressions.{And, Attribute, Expression} +import org.apache.spark.sql.catalyst.expressions.{And, Attribute, AttributeReference, Expression} import org.apache.spark.sql.vectorized.ColumnarBatch import com.google.common.collect.Lists @@ -34,6 +34,7 @@ import com.google.common.collect.Lists import scala.collection.JavaConverters._ trait BasicScanExecTransformer extends LeafTransformSupport with BaseDataSource { + import org.apache.spark.sql.catalyst.util._ /** Returns the filters that can be pushed down to native file scan */ def filterExprs(hasMetadataColFilters: Boolean = true): Seq[Expression] @@ -60,6 +61,7 @@ trait BasicScanExecTransformer extends LeafTransformSupport with BaseDataSource getPartitions.map( BackendsApiManager.getIteratorApiInstance .genSplitInfo(_, getPartitionSchema, fileFormat, getMetadataColumns.map(_.name))) + } def doExecuteColumnarInternal(): RDD[ColumnarBatch] = { val numOutputRows = longMetric("outputRows")