From c17b0922788fa182e792f8858df149a33ff9343f Mon Sep 17 00:00:00 2001 From: panbingkun Date: Fri, 17 Nov 2023 21:39:33 -0800 Subject: [PATCH] [SPARK-45967][BUILD] Upgrade jackson to 2.16.0 ### What changes were proposed in this pull request? The pr aims to upgrade FasterXML jackson from 2.15.2 to 2.16.0. ### Why are the changes needed? New version that fix some bugs, release notes as follows: - 2.1.6.0 https://github.com/FasterXML/jackson/wiki/Jackson-Release-2.16, eg: [Databind](https://github.com/FasterXML/jackson-databind) [#1770](https://github.com/FasterXML/jackson-databind/issues/1770): Incorrect deserialization for BigDecimal numbers - 2.15.3 https://github.com/FasterXML/jackson/wiki/Jackson-Release-2.15.3, eg: [Databind](https://github.com/FasterXML/jackson-databind) [#3968](https://github.com/FasterXML/jackson-databind/issues/3968): Records with additional constructors failed to deserialize The last upgrade occurred 6 months ago, https://github.com/apache/spark/pull/41414 ### Does this PR introduce _any_ user-facing change? No. ### How was this patch tested? Pass GA. ### Was this patch authored or co-authored using generative AI tooling? No. Closes #43859 from panbingkun/SPARK-45967. Authored-by: panbingkun Signed-off-by: Dongjoon Hyun --- dev/deps/spark-deps-hadoop-3-hive-2.3 | 16 ++++++++-------- pom.xml | 4 ++-- .../apache/spark/sql/JsonFunctionsSuite.scala | 6 ++++-- 3 files changed, 14 insertions(+), 12 deletions(-) diff --git a/dev/deps/spark-deps-hadoop-3-hive-2.3 b/dev/deps/spark-deps-hadoop-3-hive-2.3 index cf469f12bcf95..afde3307c622f 100644 --- a/dev/deps/spark-deps-hadoop-3-hive-2.3 +++ b/dev/deps/spark-deps-hadoop-3-hive-2.3 @@ -98,15 +98,15 @@ httpcore/4.4.16//httpcore-4.4.16.jar ini4j/0.5.4//ini4j-0.5.4.jar istack-commons-runtime/3.0.8//istack-commons-runtime-3.0.8.jar ivy/2.5.1//ivy-2.5.1.jar -jackson-annotations/2.15.2//jackson-annotations-2.15.2.jar +jackson-annotations/2.16.0//jackson-annotations-2.16.0.jar jackson-core-asl/1.9.13//jackson-core-asl-1.9.13.jar -jackson-core/2.15.2//jackson-core-2.15.2.jar -jackson-databind/2.15.2//jackson-databind-2.15.2.jar -jackson-dataformat-cbor/2.15.2//jackson-dataformat-cbor-2.15.2.jar -jackson-dataformat-yaml/2.15.2//jackson-dataformat-yaml-2.15.2.jar -jackson-datatype-jsr310/2.15.2//jackson-datatype-jsr310-2.15.2.jar +jackson-core/2.16.0//jackson-core-2.16.0.jar +jackson-databind/2.16.0//jackson-databind-2.16.0.jar +jackson-dataformat-cbor/2.16.0//jackson-dataformat-cbor-2.16.0.jar +jackson-dataformat-yaml/2.16.0//jackson-dataformat-yaml-2.16.0.jar +jackson-datatype-jsr310/2.16.0//jackson-datatype-jsr310-2.16.0.jar jackson-mapper-asl/1.9.13//jackson-mapper-asl-1.9.13.jar -jackson-module-scala_2.13/2.15.2//jackson-module-scala_2.13-2.15.2.jar +jackson-module-scala_2.13/2.16.0//jackson-module-scala_2.13-2.16.0.jar jakarta.annotation-api/1.3.5//jakarta.annotation-api-1.3.5.jar jakarta.inject/2.6.1//jakarta.inject-2.6.1.jar jakarta.servlet-api/4.0.3//jakarta.servlet-api-4.0.3.jar @@ -244,7 +244,7 @@ scala-reflect/2.13.12//scala-reflect-2.13.12.jar scala-xml_2.13/2.2.0//scala-xml_2.13-2.2.0.jar slf4j-api/2.0.9//slf4j-api-2.0.9.jar snakeyaml-engine/2.7//snakeyaml-engine-2.7.jar -snakeyaml/2.0//snakeyaml-2.0.jar +snakeyaml/2.2//snakeyaml-2.2.jar snappy-java/1.1.10.5//snappy-java-1.1.10.5.jar spire-macros_2.13/0.18.0//spire-macros_2.13-0.18.0.jar spire-platform_2.13/0.18.0//spire-platform_2.13-0.18.0.jar diff --git a/pom.xml b/pom.xml index f8363a66abdb4..7fea143bb4d2b 100644 --- a/pom.xml +++ b/pom.xml @@ -185,8 +185,8 @@ true true 1.9.13 - 2.15.2 - 2.15.2 + 2.16.0 + 2.16.0 2.3.0 3.0.2 1.1.10.5 diff --git a/sql/core/src/test/scala/org/apache/spark/sql/JsonFunctionsSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/JsonFunctionsSuite.scala index 933f362db663f..87593afb332df 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/JsonFunctionsSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/JsonFunctionsSuite.scala @@ -1175,7 +1175,8 @@ class JsonFunctionsSuite extends QueryTest with SharedSparkSession { val invalidDataType = "MAP" val invalidDataTypeReason = "Unrecognized token 'MAP': " + "was expecting (JSON String, Number, Array, Object or token 'null', 'true' or 'false')\n " + - "at [Source: (String)\"MAP\"; line: 1, column: 4]" + "at [Source: REDACTED (`StreamReadFeature.INCLUDE_SOURCE_IN_LOCATION` disabled); " + + "line: 1, column: 4]" checkError( exception = intercept[AnalysisException] { df.select(from_json($"json", invalidDataType, Map.empty[String, String])).collect() @@ -1190,7 +1191,8 @@ class JsonFunctionsSuite extends QueryTest with SharedSparkSession { val invalidTableSchema = "x INT, a cow" val invalidTableSchemaReason = "Unrecognized token 'x': " + "was expecting (JSON String, Number, Array, Object or token 'null', 'true' or 'false')\n" + - " at [Source: (String)\"x INT, a cow\"; line: 1, column: 2]" + " at [Source: REDACTED (`StreamReadFeature.INCLUDE_SOURCE_IN_LOCATION` disabled); " + + "line: 1, column: 2]" checkError( exception = intercept[AnalysisException] { df.select(from_json($"json", invalidTableSchema, Map.empty[String, String])).collect()