Skip to content

Commit

Permalink
add uts
Browse files Browse the repository at this point in the history
  • Loading branch information
taiyang-li committed Dec 5, 2023
1 parent 862fa93 commit ddf6b62
Show file tree
Hide file tree
Showing 2 changed files with 11 additions and 11 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,7 @@ class GlutenClickHouseHiveTableSuite()
"spark.sql.warehouse.dir",
getClass.getResource("/").getPath + "unit-tests-working-home/spark-warehouse")
.set("spark.hive.exec.dynamic.partition.mode", "nonstrict")
.set("spark.gluten.supported.hive.udfs", "my_add")
.setMaster("local[*]")
}

Expand Down Expand Up @@ -1060,4 +1061,14 @@ class GlutenClickHouseHiveTableSuite()
compareResultsAgainstVanillaSpark(select_sql, compareResult = true, _ => {})
spark.sql("DROP TABLE test_tbl_3548")
}

test("test 'hive udf'") {
val jarPath = "backends-clickhouse/src/test/resources/udfs/hive-test-udfs.jar"
val jarUrl = s"file://${System.getProperty("user.dir")}/$jarPath"
spark.sql(
s"CREATE FUNCTION my_add as " +
"'org.apache.hadoop.hive.contrib.udf.example.UDFExampleAdd2' USING JAR '$jarUrl'")
runQueryAndCompare("select MY_ADD(id, id+1) from range(10)")(
checkOperatorMatch[ProjectExecTransformer])
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,6 @@ class GlutenClickHouseTPCHParquetSuite extends GlutenClickHouseTPCHAbstractSuite
.set("spark.sql.autoBroadcastJoinThreshold", "10MB")
.set("spark.gluten.sql.columnar.backend.ch.use.v2", "false")
.set("spark.gluten.supported.scala.udfs", "my_add")
.set("spark.gluten.supported.hive.udfs", "my_add")
}

override protected val createNullableTables = true
Expand Down Expand Up @@ -1319,16 +1318,6 @@ class GlutenClickHouseTPCHParquetSuite extends GlutenClickHouseTPCHAbstractSuite
checkOperatorMatch[ProjectExecTransformer])
}

ignore("test 'hive udf'") {
val jarPath = "backends-clickhouse/src/test/resources/udfs/hive-test-udfs.jar"
val jarUrl = s"file://${System.getProperty("user.dir")}/$jarPath"
spark.sql(
s"CREATE FUNCTION my_add as " +
"'org.apache.hadoop.hive.contrib.udf.example.UDFExampleAdd2' USING JAR '$jarUrl'")
runQueryAndCompare("select my_add(id, id+1) from range(10)")(
checkOperatorMatch[ProjectExecTransformer])
}

override protected def runTPCHQuery(
queryNum: Int,
tpchQueries: String = tpchQueries,
Expand Down

0 comments on commit ddf6b62

Please sign in to comment.