Spark 2.0.0中是否有预先构建的异常检测算法/四分位间距识别方法?我在这里找到了一些代码,但我不认为这在spark2.0.0中可用
谢谢
有人可以帮助我解决以下错误吗?我试图将数据帧转换为rdd,以便它可以用于回归模型构建.
SPARK版本:2.0.0
Error => ClassCastException:org.apache.spark.ml.linalg.DenseVector 无法强制转换为 org.apache.spark.mllib.linalg.向量
代码=>
import org.apache.spark.mllib.classification.LogisticRegressionWithLBFGS
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.rdd.RDD
import org.apache.spark.sql._
import org.apache.spark.sql.Row
val binarizer2: Binarizer = new Binarizer()
.setInputCol("repay_amt").setOutputCol("label").setThreshold(20.00)
df = binarizer2.transform(df)
val assembler = new VectorAssembler()
.setInputCols(Array("tot_txns", "avg_unpaiddue", "max_unpaiddue", "sale_txn", "max_amt", "tot_sale_amt")).setOutputCol("features")
df = assembler.transform(df)
df.write.mode(SaveMode.Overwrite).parquet("lazpay_final_data.parquet")
val df2 = spark.read.parquet("lazpay_final_data.parquet/")
val df3= df2.rdd.map(r => LabeledPoint(r.getDouble(0),r.getAs("features")))
Run Code Online (Sandbox Code Playgroud)
数据=>
我正在使用RStudio.
创建会话后,如果我尝试使用R数据创建数据帧,则会出错.
Sys.setenv(SPARK_HOME = "E:/spark-2.0.0-bin-hadoop2.7/spark-2.0.0-bin-hadoop2.7")
Sys.setenv(HADOOP_HOME = "E:/winutils")
.libPaths(c(file.path(Sys.getenv("SPARK_HOME"), "R", "lib"), .libPaths()))
Sys.setenv('SPARKR_SUBMIT_ARGS'='"sparkr-shell"')
library(SparkR)
sparkR.session(sparkConfig = list(spark.sql.warehouse.dir="C:/Temp"))
localDF <- data.frame(name=c("John", "Smith", "Sarah"), age=c(19, 23, 18))
df <- createDataFrame(localDF)
Run Code Online (Sandbox Code Playgroud)
错误:
Error in invokeJava(isStatic = TRUE, className, methodName, ...) :
java.lang.reflect.InvocationTargetException
at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
at sun.reflect.NativeConstructorAccessorImpl.newInstance(Unknown Source)
at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(Unknown Source)
at java.lang.reflect.Constructor.newInstance(Unknown Source)
at org.apache.spark.sql.hive.client.IsolatedClientLoader.createClient(IsolatedClientLoader.scala:258)
at org.apache.spark.sql.hive.HiveUtils$.newClientForMetadata(HiveUtils.scala:359)
at org.apache.spark.sql.hive.HiveUtils$.newClientForMetadata(HiveUtils.scala:263)
at org.apache.spark.sql.hive.HiveSharedState.metadataHive$lzycompute(HiveSharedState.scala:39)
at org.apache.spark.sql.hive.HiveSharedState.metadataHive(HiveSharedState.scala:38)
at org.apache.spark.sql.hive.HiveSharedState.externalCatalog$lzycompute(HiveSharedState.scala:46)
at org.apache.spark.sql.hive.HiveSharedState.externalCatalog(HiveSharedState.scala:45)
at org.a
>
Run Code Online (Sandbox Code Playgroud)
TIA.