Cap*_*ree 0 hadoop scala apache-spark
我有以下火花代码:
    import org.apache.hadoop.hbase.client._
    import org.apache.hadoop.hbase.{ HBaseConfiguration, HTableDescriptor }
    import org.apache.hadoop.hbase.mapreduce.TableInputFormat
    import org.apache.hadoop.hbase.io.ImmutableBytesWritable
    import org.apache.hadoop.hbase.util.Bytes
    import kafka.serializer.StringDecoder
    import org.apache.spark._
    import org.apache.spark.SparkContext._
    import org.apache.spark.streaming._
    import org.apache.spark.streaming.kafka._
object Hbase {
    def main(args: Array[String]) {
          val sparkConf = new SparkConf().setAppName("Spark-Hbase").setMaster("local[2]")
          val sc = new SparkContext(sparkConf)
          ...
          val ssc = new StreamingContext(sparkConf, Seconds(3))
          val kafkaBrokers = Map("metadata.broker.list" -> "localhost:9092")
          val topics = List("test").toSet
          val lines = KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder](ssc, kafkaBrokers, topics)
    }
}
现在我得到的错误是:
Only one SparkContext may be running in this JVM (see SPARK-2243). To ignore this error, set spark.driver.allowMultipleContexts = true.
上面的代码有什么问题吗?我没有看到我再次创建上下文的位置......
这些是您正在创建的两个SparkContext.这是不允许的.
val sc = new SparkContext(sparkConf)
val ssc = new StreamingContext(sparkConf, Seconds(3))
您应该从原始上下文创建流式上下文.
val ssc = new StreamingContext(sc, Seconds(3))
| 归档时间: | 
 | 
| 查看次数: | 522 次 | 
| 最近记录: |