Skip to content

Commit

Permalink
Support HyperLogLog++
Browse files Browse the repository at this point in the history
Signed-off-by: Chong Gao <[email protected]>
  • Loading branch information
Chong Gao committed Oct 24, 2024
1 parent ed4c878 commit 5f5f26a
Show file tree
Hide file tree
Showing 3 changed files with 186 additions and 0 deletions.
9 changes: 9 additions & 0 deletions integration_tests/src/main/python/arithmetic_ops_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,15 @@ def _get_overflow_df(spark, data, data_type, expr):
StructType([StructField('a', data_type)])
).selectExpr(expr)

def test_hll():
assert_gpu_and_cpu_are_equal_sql(
lambda spark : spark.read.parquet("/home/chongg/a"),
"tab",
"select c1, APPROX_COUNT_DISTINCT(c2) from tab group by c1",
{"spark.rapids.sql.agg.forceSinglePassPartialSort": False,
"spark.rapids.sql.agg.singlePassPartialSortEnabled": False}
)

@pytest.mark.parametrize('data_gen', _arith_data_gens, ids=idfn)
@disable_ansi_mode
def test_addition(data_gen):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3918,6 +3918,16 @@ object GpuOverrides extends Logging {
GpuDynamicPruningExpression(child)
}
}),
expr[HyperLogLogPlusPlus](
"Aggregation approximate count distinct",
ExprChecks.reductionAndGroupByAgg(TypeSig.LONG, TypeSig.LONG,
Seq(ParamCheck("input", TypeSig.all, TypeSig.all))),
(a, conf, p, r) => new UnaryExprMeta[HyperLogLogPlusPlus](a, conf, p, r) {
override def convertToGpu(child: Expression): GpuExpression = {
GpuHyperLogLogPlusPlus(child, a.relativeSD)
}
}
),
SparkShimImpl.ansiCastRule
).collect { case r if r != null => (r.getClassFor.asSubclass(classOf[Expression]), r)}.toMap

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,167 @@
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.spark.sql.rapids.aggregate

import scala.collection.immutable.Seq

import ai.rapids.cudf
import ai.rapids.cudf.{DType, GroupByAggregation, ReductionAggregation}
import com.nvidia.spark.rapids._
import com.nvidia.spark.rapids.Arm.withResourceIfAllowed
import com.nvidia.spark.rapids.RapidsPluginImplicits.ReallyAGpuExpression
import com.nvidia.spark.rapids.jni.HLL
import com.nvidia.spark.rapids.shims.ShimExpression

import org.apache.spark.sql.catalyst.expressions.{AttributeReference, Expression}
import org.apache.spark.sql.catalyst.util.HyperLogLogPlusPlusHelper
import org.apache.spark.sql.rapids.{GpuCreateNamedStruct, GpuGetStructField}
import org.apache.spark.sql.types._
import org.apache.spark.sql.vectorized.ColumnarBatch

case class CudfHLLPP(override val dataType: DataType,
numRegistersPerSketch: Int) extends CudfAggregate {
override lazy val reductionAggregate: cudf.ColumnVector => cudf.Scalar =
(input: cudf.ColumnVector) => input.reduce(
ReductionAggregation.HLL(numRegistersPerSketch), DType.STRUCT)
override lazy val groupByAggregate: GroupByAggregation =
GroupByAggregation.HLL(numRegistersPerSketch)
override val name: String = "CudfHLLPP"
}

case class CudfMergeHLLPP(override val dataType: DataType,
numRegistersPerSketch: Int)
extends CudfAggregate {
override lazy val reductionAggregate: cudf.ColumnVector => cudf.Scalar =
(input: cudf.ColumnVector) =>
input.reduce(ReductionAggregation.mergeHLL(numRegistersPerSketch), DType.STRUCT)
override lazy val groupByAggregate: GroupByAggregation =
GroupByAggregation.mergeHLL(numRegistersPerSketch)
override val name: String = "CudfMergeHLLPP"
}

/**
* Perform the final evaluation step to compute approximate count distinct from sketches.
* Input is long columns, first construct struct of long then feed to cuDF
*/
case class GpuHyperLogLogPlusPlusEvaluation(childExpr: Expression,
numRegistersPerSketch: Int)
extends GpuExpression with ShimExpression {
override def dataType: DataType = LongType

override def prettyName: String = "HLLPP_evaluation"

override def nullable: Boolean = false

override def children: scala.Seq[Expression] = Seq(childExpr)

override def columnarEval(batch: ColumnarBatch): GpuColumnVector = {
withResourceIfAllowed(childExpr.columnarEval(batch)) { sketches =>
val distinctValues = HLL.estimateDistinctValueFromSketches(
sketches.getBase, numRegistersPerSketch)
GpuColumnVector.from(distinctValues, LongType)
}
}
}

/**
* Gpu version of HyperLogLogPlusPlus
* Spark APPROX_COUNT_DISTINCT on NULLs returns zero
*/
case class GpuHyperLogLogPlusPlus(childExpr: Expression, relativeSD: Double)
extends GpuAggregateFunction with Serializable {

// Refer to Spark utility
private val hllppHelper = new HyperLogLogPlusPlusHelper(relativeSD)

// Consistent with Spark
private lazy val numRegistersPerSketch: Int =
1 << Math.ceil(2.0d * Math.log(1.106d / relativeSD) / Math.log(2.0d)).toInt

// Spark agg buffer type: long array
private lazy val sparkAggBufferAttributes: Seq[AttributeReference] = {
Seq.tabulate(hllppHelper.numWords) { i =>
AttributeReference(s"MS[$i]", LongType)()
}
}

/**
* Spark uses long columns to save agg buffer, e.g.: long[52]
* Each long compacts multiple registers to save memory
*/
override val aggBufferAttributes: Seq[AttributeReference] = sparkAggBufferAttributes

/**
* init long array with all zero
*/
override lazy val initialValues: Seq[Expression] = Seq.tabulate(hllppHelper.numWords) { _ =>
GpuLiteral(0L, LongType)
}

override lazy val inputProjection: Seq[Expression] = Seq(childExpr)

/**
* cuDF HLLPP sketch type: struct<long, ..., long>
*/
private lazy val cuDFBufferType: DataType = StructType.fromAttributes(aggBufferAttributes)

/**
* cuDF uses Struct<long, ..., long> column to do aggregate
*/
override lazy val updateAggregates: Seq[CudfAggregate] =
Seq(CudfHLLPP(cuDFBufferType, numRegistersPerSketch))

/**
* Convert long columns to Struct<long, ..., long> column
*/
private def genStruct: Seq[Expression] = {
val names = Seq.tabulate(hllppHelper.numWords) { i => GpuLiteral(s"MS[$i]", StringType) }
Seq(GpuCreateNamedStruct(names.zip(aggBufferAttributes).flatten { case (a, b) => List(a, b) }))
}

/**
* Convert Struct<long, ..., long> column to long columns
*/
override lazy val postUpdate: Seq[Expression] = Seq.tabulate(hllppHelper.numWords) {
i => GpuGetStructField(postUpdateAttr.head, i)
}

/**
* convert to Struct<long, ..., long>
*/
override lazy val preMerge: Seq[Expression] = genStruct

override lazy val mergeAggregates: Seq[CudfAggregate] =
Seq(CudfMergeHLLPP(cuDFBufferType, numRegistersPerSketch))

/**
* Convert Struct<long, ..., long> column to long columns
*/
override lazy val postMerge: Seq[Expression] = Seq.tabulate(hllppHelper.numWords) {
i => GpuGetStructField(postMergeAttr.head, i)
}

override lazy val evaluateExpression: Expression =
GpuHyperLogLogPlusPlusEvaluation(genStruct.head, numRegistersPerSketch)

override def dataType: DataType = LongType

override def prettyName: String = "approx_count_distinct"

override def nullable: Boolean = true

override def children: Seq[Expression] = Seq(childExpr)
}

0 comments on commit 5f5f26a

Please sign in to comment.