如何在Spark SQL中创建永久表

tra*_*loo 3 java apache-spark apache-spark-sql

在我的项目中,我正在将数据从MongoDB传输到SparkSQL表以进行基于SQL的查询。但是Spark SQL让我创建了临时文件。当我要查询某些内容时,执行时间非常长,因为数据传输和映射操作会花费太多时间。

那么,我可以减少执行时间吗?我可以创建永久性Spark SQL表吗?我可以使用JDBC查询永久表吗?

我要添加代码和执行时间结果。我正在独立模式下执行所有操作。

package com.mongodb.spark.sql;

import java.util.List;

import org.apache.hadoop.conf.Configuration;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.sql.DataFrame;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SQLContext;
import org.bson.BSONObject;

import com.mongodb.hadoop.MongoInputFormat;
import com.mongodb.spark.demo.Observation;
import com.mongodb.spark.demo.Sensor;

import scala.Tuple2;

public class SparkSqlMongo {

public static void main(String[] args) {

    Configuration conf = new Configuration();

    conf.set("mongo.job.input.format", "com.mongodb.hadoop.MongoInputFormat");
    conf.set("mongo.input.uri", "mongodb://localhost:27017/test.observations");

    Configuration sensConf = new Configuration();

    sensConf.set("mongo.job.input.format", "com.mongodb.hadoop.MongoInputFormat");
    sensConf.set("mongo.input.uri", "mongodb://localhost:27017/test.sens");

    SparkConf sconf = new SparkConf().setMaster("local[2]").setAppName("SQL DENEME").set("nsmc.connection.host",
            "mongodb:");

    JavaSparkContext sc = new JavaSparkContext(sconf);
    SQLContext sql = new SQLContext(sc);

    JavaRDD<Observation> obs = sc.newAPIHadoopRDD(conf, MongoInputFormat.class, Object.class, BSONObject.class)
            .map(new Function<Tuple2<Object, BSONObject>, Observation>() {

                private static final long serialVersionUID = 1L;

                @Override
                public Observation call(Tuple2<Object, BSONObject> v1) throws Exception {

                    int id = (int) v1._2.get("_id");
                    double value = (double) v1._2.get("Value");
                    // Date time = (Date) v1._2.get("Time");
                    int sensor = (int) v1._2.get("SensorId");
                    int stream = (int) v1._2.get("DataStreamId");

                    Observation obs = new Observation(id, value, sensor, stream);
                    return obs;

                }
            });

    DataFrame obsi = sql.createDataFrame(obs, Observation.class);

    obsi.registerTempTable("obsi");

    JavaRDD<Sensor> sens = sc.newAPIHadoopRDD(sensConf, MongoInputFormat.class, Object.class, BSONObject.class)
            .map(new Function<Tuple2<Object, BSONObject>, Sensor>() {

                /**
                 * 
                 */
                private static final long serialVersionUID = 1L;

                @Override
                public Sensor call(Tuple2<Object, BSONObject> v1) throws Exception {

                    int id = (int) v1._2.get("_id");
                    String name = (String) v1._2.get("Name");
                    String description = (String) v1._2.get("Description");

                    Sensor s = new Sensor(id, name, description);

                    System.out.println(s.getName());
                    return s;

                }
            });

    DataFrame sensi = sql.createDataFrame(sens, Sensor.class);

    sensi.registerTempTable("sensi");

    sensi.show();

    long start = System.currentTimeMillis();

    DataFrame obser = sql
            .sql("SELECT obsi.value, obsi.id, sensi.name FROM obsi, sensi WHERE obsi.sensorID = sensi.id  and sensi.id = 107")
            .cache();
    long stop = System.currentTimeMillis();

    // System.out.println("count ====>>> " + a.toString());
    System.out.println("toplam sorgu zamani : " + (stop - start));
    ;
    //
    // while(!obser.equals(null)){
    // System.out.println(obser);
    // }

    List<String> names = obser.javaRDD().map(new Function<Row, String>() {

        private static final long serialVersionUID = 1L;

        public String call(Row row) {

            // System.out.println(row);
            // System.out.println("value : " + row.getDouble(0) + " id : " +
            // row.getInt(1) + " name : " + row.getString(0));
            return "Name: " + row;
        }
    }).collect();

}
Run Code Online (Sandbox Code Playgroud)

}

对于大约5M观察和1K sns数据,所有执行时间约为120秒。我加入了这些表,执行时间非常长且无法接受。

Kau*_*hal 5

  1. 是的,您可以通过Caching your Table,Dataframe或Rdd来缩短程序执行时间。
  2. 并且,如果要将数据另存为永久表,则可以使用df.saveAsTable方法,但应通过HiveContext创建数据框。
  3. 对于JDBC连接,您需要启动JDBC连接,Thrift service然后可以Spark Sql在registers表上执行。