小编Nag*_*tal的帖子

Hadoop copyFromLocal 内存不足问题

我试图将包含 1,048,578 个文件的目录复制到hdfs文件系统中,但是出现以下错误:

Exception in thread "main" java.lang.OutOfMemoryError: Java heap space
    at java.util.Arrays.copyOf(Arrays.java:2367)
    at java.lang.AbstractStringBuilder.expandCapacity(AbstractStringBuilder.java:130)
    at java.lang.AbstractStringBuilder.ensureCapacityInternal(AbstractStringBuilder.java:114)
    at java.lang.AbstractStringBuilder.append(AbstractStringBuilder.java:415)
    at java.lang.StringBuffer.append(StringBuffer.java:237)
    at java.net.URI.appendSchemeSpecificPart(URI.java:1892)
    at java.net.URI.toString(URI.java:1922)
    at java.net.URI.<init>(URI.java:749)
    at org.apache.hadoop.fs.shell.PathData.stringToUri(PathData.java:565)
    at org.apache.hadoop.fs.shell.PathData.<init>(PathData.java:151)
    at org.apache.hadoop.fs.shell.PathData.getDirectoryContents(PathData.java:273)
    at org.apache.hadoop.fs.shell.Command.recursePath(Command.java:347)
    at org.apache.hadoop.fs.shell.CommandWithDestination.recursePath(CommandWithDestination.java:291)
    at org.apache.hadoop.fs.shell.Command.processPaths(Command.java:308)
    at org.apache.hadoop.fs.shell.Command.processPathArgument(Command.java:278)
    at org.apache.hadoop.fs.shell.CommandWithDestination.processPathArgument(CommandWithDestination.java:243)
    at org.apache.hadoop.fs.shell.Command.processArgument(Command.java:260)
    at org.apache.hadoop.fs.shell.Command.processArguments(Command.java:244)
    at org.apache.hadoop.fs.shell.CommandWithDestination.processArguments(CommandWithDestination.java:220)
    at org.apache.hadoop.fs.shell.CopyCommands$Put.processArguments(CopyCommands.java:267)
    at org.apache.hadoop.fs.shell.Command.processRawArguments(Command.java:190)
    at org.apache.hadoop.fs.shell.Command.run(Command.java:154)
    at org.apache.hadoop.fs.FsShell.run(FsShell.java:287)
    at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:70)
    at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:84)
    at org.apache.hadoop.fs.FsShell.main(FsShell.java:340)
Run Code Online (Sandbox Code Playgroud)

java hadoop copy heap-memory out-of-memory

1
推荐指数
1
解决办法
2357
查看次数

spark弹性搜索抛出403禁止错误

当我尝试使用基本身份验证从 spark 连接 elasticsearch 以创建新索引时,出现以下错误。

来自弹性搜索的错误没有提供完整的错误信息来调试更多

 org.elasticsearch.hadoop.rest.EsHadoopInvalidRequest: [HEAD] on [devl_test_index] failed; server[https://<elasticServerHost>:9200] returned [403|Forbidden:]
            at org.elasticsearch.hadoop.rest.RestClient.checkResponse(RestClient.java:477)
            at org.elasticsearch.hadoop.rest.RestClient.executeNotFoundAllowed(RestClient.java:447)
            at org.elasticsearch.hadoop.rest.RestClient.exists(RestClient.java:539)
            at org.elasticsearch.hadoop.rest.RestClient.indexExists(RestClient.java:534)
            at org.elasticsearch.hadoop.rest.RestClient.touch(RestClient.java:545)
            at org.elasticsearch.hadoop.rest.RestRepository.touch(RestRepository.java:364)
            at org.elasticsearch.hadoop.rest.RestService.initSingleIndex(RestService.java:660)
            at org.elasticsearch.hadoop.rest.RestService.createWriter(RestService.java:636)
            at org.elasticsearch.spark.rdd.EsRDDWriter.write(EsRDDWriter.scala:65)
            at org.elasticsearch.spark.sql.EsSparkSQL$$anonfun$saveToEs$1.apply(EsSparkSQL.scala:101)
            at org.elasticsearch.spark.sql.EsSparkSQL$$anonfun$saveToEs$1.apply(EsSparkSQL.scala:101)
            at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
            at org.apache.spark.scheduler.Task.run(Task.scala:109)
            at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345)
            at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
            at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
            at java.lang.Thread.run(Thread.java:745)
Run Code Online (Sandbox Code Playgroud)

用于连接的代码:

Sbt dependency: "org.elasticsearch" % "elasticsearch-hadoop" % "7.5.0"

import org.elasticsearch.spark.sql._

 val spark = SparkSession.builder().appName("SparkJDBC")
    .enableHiveSupport()
    .config("spark.es.port","9200")
    .config("spark.es.nodes", "<elasticServerHost>")
    .config("spark.es.nodes.wan.only","true")
    .config("spark.es.net.ssl","true")
    .config("spark.es.net.http.auth.user","USERNAME")
    .config("spark.es.net.http.auth.pass","PASSWRD")
    .master("local[*]")
    .getOrCreate()

val df = spark.sql("select * from employee")

df.saveToEs("devl_test_index")
Run Code Online (Sandbox Code Playgroud)

scala elasticsearch apache-spark

1
推荐指数
1
解决办法
719
查看次数