scala.ScalaReflectionException:JavaMirror 中的类 java.sql.Timestamp 和 ClasspathFilter

Ash*_*tal 6 java unit-testing scala apache-spark

我有一个单元测试,它基本上使用案例类将数据帧转换为数据集。

当我使用 jdk-8 运行该测试时,它通过了但当我使用 jdk 10 或 11 时,它失败了:“scala.ScalaReflectionException: class java.sql.Timestamp in JavaMirror with ClasspathFilter” 我没有从失败中获得任何其他信息信息。我的单元测试看起来像:

  "A dataFrameToDataSet function" should "return DataSet[AssetTagAsset] from dataframe that contains Asset data" in {

val assetTagAssets = Map[Long, AssetTagAsset](
  (
    1L,
    new AssetTagAsset(
      asset_tag_asset_id = 1L,
      asset_tag_asset_uuid = "test",
      asset_id = 1L,
      asset_tag_id = 1L,
      sticky = "abc",
      added_date = Option(Timestamp.valueOf("2018-04-17 10:10:50")),
      added_user_id = Option(1L),
      region = "sjc01",
      environment = "eng",
      pod = "p05"
    )
  )
)

val DecimalType = DataTypes.createDecimalType(38, 10)

val assetTagAssetSchema = List(
  StructField("ASSET_TAG_ASSET_ID", DecimalType, false),
  StructField("ASSET_TAG_ASSET_UUID", StringType, false),
  StructField("ASSET_ID", DecimalType, false),
  StructField("ASSET_TAG_ID", DecimalType, false),
  StructField("STICKY", StringType, false),
  StructField("ADDED_DATE", TimestampType, true),
  StructField("ADDED_USER_ID", DecimalType, true),
  StructField("REGION", StringType, false),
  StructField("ENVIRONMENT", StringType, false),
  StructField("POD", StringType, false)
)

val assetTagAssetRows = assetTagAssets.values
  .map(
    assetTagAsset =>
      Row(
        java.math.BigDecimal.valueOf(assetTagAsset.asset_tag_asset_id),
        assetTagAsset.asset_tag_asset_uuid,
        java.math.BigDecimal.valueOf(assetTagAsset.asset_id),
        java.math.BigDecimal.valueOf(assetTagAsset.asset_tag_id),
        assetTagAsset.sticky,
        assetTagAsset.added_date.orNull,
        toBigDecimal(assetTagAsset.added_user_id),
        assetTagAsset.region,
        assetTagAsset.environment,
        assetTagAsset.pod
    )
  )
  .toSeq

val assetTagAssetDF = spark.createDataFrame(
  spark.sparkContext.parallelize(assetTagAssetRows),
  StructType(assetTagAssetSchema)
)

val ds = dataFrameToDataSet[AssetTagAsset]("oracle_grc_asset_tag_asset", assetTagAssetDF)
assert(ds.count() === assetTagAssets.size)

ds.collect.foreach(assetTagAsset => {
  val expectedAssetTagAsset = assetTagAssets(assetTagAsset.asset_tag_asset_id)
  assert(assetTagAsset.asset_tag_asset_uuid === expectedAssetTagAsset.asset_tag_asset_uuid)
  assert(assetTagAsset.asset_id === expectedAssetTagAsset.asset_id)
  assert(assetTagAsset.asset_tag_id === expectedAssetTagAsset.asset_tag_id)
  assert(assetTagAsset.sticky === expectedAssetTagAsset.sticky)
  assert(assetTagAsset.added_date === expectedAssetTagAsset.added_date)
  assert(assetTagAsset.added_user_id === expectedAssetTagAsset.added_user_id)
  assert(assetTagAsset.region === expectedAssetTagAsset.region)
  assert(assetTagAsset.environment === expectedAssetTagAsset.environment)
  assert(assetTagAsset.pod === expectedAssetTagAsset.pod)
})
Run Code Online (Sandbox Code Playgroud)

}

AssetTagAsset 是一个案例类,如下所示:

case class AssetTagAsset( asset_tag_asset_id: Long, asset_tag_asset_uuid: String, asset_id: Long, asset_tag_id: Long, sticky: String, added_date: Option[java.sql.Timestamp], added_user_id: Option[Long], region: String, environment: String, pod: String ) def apply(row: Row): AssetTagAsset = { new AssetTagAsset( asset_tag_asset_id = row.getAs[java.math.BigDecimal]("ASSET_TAG_ASSET_ID").longValue, asset_tag_asset_uuid = row.getAs[String]("ASSET_TAG_ASSET_UUID"), asset_id = row.getAs[java.math.BigDecimal]("ASSET_ID").longValue, asset_tag_id = row.getAs[java.math.BigDecimal]("ASSET_TAG_ID").longValue, sticky = row.getAs[String]("STICKY"), added_date = getOptionValue(row, "ADDED_DATE"), added_user_id = getOptionValue(row, "ADDED_USER_ID"), region = row.getAs[String]("REGION"), environment = row.getAs[String]("ENVIRONMENT"), pod = row.getAs[String]("POD") ) } }