当前位置: 首页>>代码示例>>Java>>正文


Java KryoSerializer类代码示例

本文整理汇总了Java中org.apache.spark.serializer.KryoSerializer的典型用法代码示例。如果您正苦于以下问题:Java KryoSerializer类的具体用法?Java KryoSerializer怎么用?Java KryoSerializer使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


KryoSerializer类属于org.apache.spark.serializer包,在下文中一共展示了KryoSerializer类的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: SparkDriver

import org.apache.spark.serializer.KryoSerializer; //导入依赖的package包/类
public SparkDriver(Properties props) {
  SparkConf conf = new SparkConf().setAppName(props.getProperty(MudrodConstants.SPARK_APP_NAME, "MudrodSparkApp")).setIfMissing("spark.master", props.getProperty(MudrodConstants.SPARK_MASTER))
      .set("spark.hadoop.validateOutputSpecs", "false").set("spark.files.overwrite", "true");

  String esHost = props.getProperty(MudrodConstants.ES_UNICAST_HOSTS);
  String esPort = props.getProperty(MudrodConstants.ES_HTTP_PORT);

  if (!"".equals(esHost)) {
    conf.set("es.nodes", esHost);
  }

  if (!"".equals(esPort)) {
    conf.set("es.port", esPort);
  }

  conf.set("spark.serializer", KryoSerializer.class.getName());
  conf.set("es.batch.size.entries", "1500");

  sc = new JavaSparkContext(conf);
  sqlContext = new SQLContext(sc);
}
 
开发者ID:apache,项目名称:incubator-sdap-mudrod,代码行数:22,代码来源:SparkDriver.java

示例2: createSparkContext

import org.apache.spark.serializer.KryoSerializer; //导入依赖的package包/类
/**
 * Helper method for creating the spark context from the given cognition configuration
 * @return a new configured spark context
 */
public SparkContext createSparkContext() {
  SparkConf conf = new SparkConf();

  Configuration config = cognition.getProperties();

  conf.set("spark.serializer", KryoSerializer.class.getName());
  conf.setAppName(config.getString("app.name"));
  conf.setMaster(config.getString("master"));

  Iterator<String> iterator = config.getKeys("spark");
  while (iterator.hasNext()) {
    String key = iterator.next();
    conf.set(key, config.getString(key));
  }

  SparkContext sc = new SparkContext(conf);
  for (String jar : config.getStringArray("jars")) {
    sc.addJar(jar);
  }

  return sc;
}
 
开发者ID:boozallen,项目名称:cognition,代码行数:27,代码来源:LensAPI.java

示例3: initialize

import org.apache.spark.serializer.KryoSerializer; //导入依赖的package包/类
private LinkedBlockingQueue<Kryo> initialize(final Configuration configuration) {
    // DCL is safe in this case due to volatility
    if (!INITIALIZED) {
        synchronized (UnshadedKryoShimService.class) {
            if (!INITIALIZED) {
                // so we don't get a WARN that a new configuration is being created within an active context
                final SparkConf sparkConf = null == Spark.getContext() ? new SparkConf() : Spark.getContext().getConf().clone();
                configuration.getKeys().forEachRemaining(key -> sparkConf.set(key, configuration.getProperty(key).toString()));
                final KryoSerializer serializer = new KryoSerializer(sparkConf);
                // Setup a pool backed by our spark.serializer instance
                // Reuse Gryo poolsize for Kryo poolsize (no need to copy this to SparkConf)
                KRYOS.clear();
                final int poolSize = configuration.getInt(GryoPool.CONFIG_IO_GRYO_POOL_SIZE, GryoPool.CONFIG_IO_GRYO_POOL_SIZE_DEFAULT);
                for (int i = 0; i < poolSize; i++) {
                    KRYOS.add(serializer.newKryo());
                }
                INITIALIZED = true;
            }
        }
    }

    return KRYOS;
}
 
开发者ID:apache,项目名称:tinkerpop,代码行数:24,代码来源:UnshadedKryoShimService.java

示例4: main

import org.apache.spark.serializer.KryoSerializer; //导入依赖的package包/类
/**
 * The main method.
 *
 * @param args the arguments
 */
public static void main(String[] args) {
       SparkConf conf = new SparkConf().setAppName("EarthdataMapperRunnableExample").setMaster("local[2]");
       conf.set("spark.serializer", KryoSerializer.class.getName());
       conf.set("spark.kryo.registrator", GeoSparkKryoRegistrator.class.getName());
       sc = new JavaSparkContext(conf);
       Logger.getLogger("org").setLevel(Level.WARN);
       Logger.getLogger("akka").setLevel(Level.WARN);
       InputLocation = System.getProperty("user.dir")+"/src/test/resources/modis/modis.csv";
       splitter = FileDataSplitter.CSV;
       indexType = IndexType.RTREE;
       queryEnvelope=new Envelope (-90.01,-80.01,30.01,40.01);
       numPartitions = 5;
       loopTimes=1;
       HDFIncrement=5;
       HDFOffset=2;
       HDFRootGroupName = "MOD_Swath_LST";
       HDFDataVariableName = "LST";
       urlPrefix = System.getProperty("user.dir")+"/src/test/resources/modis/";
       testSpatialRangeQuery();
       testSpatialRangeQueryUsingIndex();
       sc.stop();
       System.out.println("All GeoSpark Earthdata DEMOs passed!");
}
 
开发者ID:DataSystemsLab,项目名称:GeoSpark,代码行数:29,代码来源:EarthdataMapperRunnableExample.java

示例5: getBaseConfiguration

import org.apache.spark.serializer.KryoSerializer; //导入依赖的package包/类
public Map<String, Object> getBaseConfiguration(final String graphName, final Class<?> test, final String testMethodName, final LoadGraphWith.GraphData loadGraphWith) {
    Spark.close();
    final Map<String, Object> config = super.getBaseConfiguration(graphName, test, testMethodName, loadGraphWith);
    // ensure the context doesn't stay open for the GryoSerializer tests to follow
    // this is primarily to ensure that the KryoShimService loaded specifically in these tests don't leak to the other tests
    config.put(Constants.GREMLIN_SPARK_PERSIST_CONTEXT, false);
    config.put("spark.serializer", KryoSerializer.class.getCanonicalName());
    config.put("spark.kryo.registrator", GryoRegistrator.class.getCanonicalName());
    System.setProperty(KRYO_SHIM_SERVICE, UnshadedKryoShimService.class.getCanonicalName());
    KryoShimServiceLoader.load(true);
    System.clearProperty(KRYO_SHIM_SERVICE);
    return config;
}
 
开发者ID:PKUSilvester,项目名称:LiteGraph,代码行数:14,代码来源:SparkHadoopGraphGryoRegistratorProvider.java

示例6: createSparkContext

import org.apache.spark.serializer.KryoSerializer; //导入依赖的package包/类
private static JavaSparkContext createSparkContext(String master, String appName) {
  SparkConf conf = new SparkConf();
  conf.setMaster(master);
  conf.setAppName(appName);
  conf.set("spark.serializer", KryoSerializer.class.getCanonicalName());
  return new JavaSparkContext(conf);
}
 
开发者ID:shakamunyi,项目名称:spark-dataflow,代码行数:8,代码来源:SparkContextFactory.java

示例7: test

import org.apache.spark.serializer.KryoSerializer; //导入依赖的package包/类
@Test
public void test() throws AccumuloSecurityException, IOException, AccumuloException, TableExistsException, TableNotFoundException {

/*Connector conn = instance.getConnector("root", new PasswordToken());
Scanner scan = conn.createScanner("moreover", Authorizations.EMPTY);
for(Map.Entry<Key, Value> entry : scan){
	System.out.println(entry);
}*/

  SparkConf conf = new SparkConf();

  conf.set("spark.serializer", KryoSerializer.class.getName());
  conf.setAppName("test");
  conf.setMaster("local[2]");

  SparkContext sc = new SparkContext(conf);

  CognitionConfiguration pip = new CognitionConfiguration(new AccumuloConfiguration(instance, user, password, true));
  LensAPI lens = new LensAPI(sc, pip);
  Criteria criteria = new Criteria();
  criteria.addKeyword("test");
  criteria.setDates(Instant.parse("2015-10-20T09:19:12Z"), Instant.parse("2015-10-20T09:19:13Z"));
  SchemaAdapter s = new SchemaAdapter();
  s.loadJson("moreover-schema.json");
  criteria.setSchema(s);
  criteria.setAccumuloTable("moreover");
  String json = lens.query(criteria);
  assertEquals("[moreover json]", json);
}
 
开发者ID:boozallen,项目名称:cognition,代码行数:30,代码来源:LensTest.java

示例8: getBaseConfiguration

import org.apache.spark.serializer.KryoSerializer; //导入依赖的package包/类
@Override
public Map<String, Object> getBaseConfiguration(final String graphName, final Class<?> test, final String testMethodName, final LoadGraphWith.GraphData loadGraphWith) {
    if (this.getClass().equals(SparkHadoopGraphProvider.class) && !SparkHadoopGraphProvider.class.getCanonicalName().equals(System.getProperty(PREVIOUS_SPARK_PROVIDER, null))) {
        Spark.close();
        HadoopPools.close();
        KryoShimServiceLoader.close();
        System.setProperty(PREVIOUS_SPARK_PROVIDER, SparkHadoopGraphProvider.class.getCanonicalName());
    }

    final Map<String, Object> config = super.getBaseConfiguration(graphName, test, testMethodName, loadGraphWith);
    config.put(Constants.GREMLIN_SPARK_PERSIST_CONTEXT, true);  // this makes the test suite go really fast

    // toy graph inputRDD does not have corresponding outputRDD so where jobs chain, it fails (failing makes sense)
    if (null != loadGraphWith &&
            !test.equals(ProgramTest.Traversals.class) &&
            !test.equals(PageRankTest.Traversals.class) &&
            !test.equals(PeerPressureTest.Traversals.class) &&
            !test.equals(FileSystemStorageCheck.class) &&
            !testMethodName.equals("shouldSupportJobChaining") &&  // GraphComputerTest.shouldSupportJobChaining
            RANDOM.nextBoolean()) {
        config.put(Constants.GREMLIN_HADOOP_GRAPH_READER, ToyGraphInputRDD.class.getCanonicalName());
    }

    // tests persisted RDDs
    if (test.equals(SparkContextStorageCheck.class)) {
        config.put(Constants.GREMLIN_HADOOP_GRAPH_READER, ToyGraphInputRDD.class.getCanonicalName());
        config.put(Constants.GREMLIN_HADOOP_GRAPH_WRITER, PersistedOutputRDD.class.getCanonicalName());
    }

    config.put(Constants.GREMLIN_HADOOP_DEFAULT_GRAPH_COMPUTER, SparkGraphComputer.class.getCanonicalName());
    config.put(SparkLauncher.SPARK_MASTER, "local[4]");
    config.put(Constants.SPARK_SERIALIZER, KryoSerializer.class.getCanonicalName());
    config.put(Constants.SPARK_KRYO_REGISTRATOR, GryoRegistrator.class.getCanonicalName());
    config.put(Constants.SPARK_KRYO_REGISTRATION_REQUIRED, true);
    return config;
}
 
开发者ID:apache,项目名称:tinkerpop,代码行数:37,代码来源:SparkHadoopGraphProvider.java

示例9: initialize

import org.apache.spark.serializer.KryoSerializer; //导入依赖的package包/类
protected static void initialize(final String testSuiteName) {
    conf = new SparkConf().setAppName(testSuiteName).setMaster("local[2]");
    conf.set("spark.serializer", KryoSerializer.class.getName());
    conf.set("spark.kryo.registrator", GeoSparkKryoRegistrator.class.getName());

    sc = new JavaSparkContext(conf);
    Logger.getLogger("org").setLevel(Level.WARN);
    Logger.getLogger("akka").setLevel(Level.WARN);
}
 
开发者ID:DataSystemsLab,项目名称:GeoSpark,代码行数:10,代码来源:GeoSparkTestBase.java

示例10: initialize

import org.apache.spark.serializer.KryoSerializer; //导入依赖的package包/类
/** The US main land boundary. */
protected static void initialize(final String testSuiteName) throws Exception{
    conf = new SparkConf().setAppName(testSuiteName).setMaster("local[*]");
    conf.set("spark.serializer", KryoSerializer.class.getName());
    conf.set("spark.kryo.registrator", GeoSparkVizKryoRegistrator.class.getName());

    sparkContext = new JavaSparkContext(conf);
    Logger.getLogger("org").setLevel(Level.WARN);
    Logger.getLogger("akka").setLevel(Level.WARN);

    prop = new Properties();

    inputProp = GeoSparkVizTestBase.class.getClassLoader().getResourceAsStream("babylon.point.properties");
    prop.load(inputProp);
    PointInputLocation = "file://"+GeoSparkVizTestBase.class.getClassLoader().getResource(prop.getProperty("inputLocation")).getPath();
    PointOffset = Integer.parseInt(prop.getProperty("offset"));;
    PointSplitter = FileDataSplitter.getFileDataSplitter(prop.getProperty("splitter"));
    PointNumPartitions = Integer.parseInt(prop.getProperty("numPartitions"));

    inputProp = GeoSparkVizTestBase.class.getClassLoader().getResourceAsStream("babylon.rectangle.properties");
    prop.load(inputProp);
    RectangleInputLocation = "file://"+GeoSparkVizTestBase.class.getClassLoader().getResource(prop.getProperty("inputLocation")).getPath();
    RectangleOffset = Integer.parseInt(prop.getProperty("offset"));
    RectangleSplitter = FileDataSplitter.getFileDataSplitter(prop.getProperty("splitter"));
    RectangleNumPartitions = Integer.parseInt(prop.getProperty("numPartitions"));

    inputProp = GeoSparkVizTestBase.class.getClassLoader().getResourceAsStream("babylon.polygon.properties");
    prop.load(inputProp);
    PolygonInputLocation = "file://"+GeoSparkVizTestBase.class.getClassLoader().getResource(prop.getProperty("inputLocation")).getPath();
    PolygonOffset = Integer.parseInt(prop.getProperty("offset"));
    PolygonSplitter = FileDataSplitter.getFileDataSplitter(prop.getProperty("splitter"));
    PolygonNumPartitions = Integer.parseInt(prop.getProperty("numPartitions"));

    inputProp = GeoSparkVizTestBase.class.getClassLoader().getResourceAsStream("babylon.linestring.properties");
    prop.load(inputProp);
    LineStringInputLocation = "file://"+GeoSparkVizTestBase.class.getClassLoader().getResource(prop.getProperty("inputLocation")).getPath();
    LineStringOffset = Integer.parseInt(prop.getProperty("offset"));
    LineStringSplitter = FileDataSplitter.getFileDataSplitter(prop.getProperty("splitter"));
    LineStringNumPartitions = Integer.parseInt(prop.getProperty("numPartitions"));

    USMainLandBoundary = new Envelope(-126.790180,-64.630926,24.863836,50.000);
}
 
开发者ID:DataSystemsLab,项目名称:GeoSpark,代码行数:43,代码来源:GeoSparkVizTestBase.java

示例11: main

import org.apache.spark.serializer.KryoSerializer; //导入依赖的package包/类
/**
 * The main method.
 *
 * @param args the arguments
 */
public static void main(String[] args) {
	SparkConf conf = new SparkConf().setAppName("GeoSparkRunnableExample").setMaster("local[2]");
	conf.set("spark.serializer", KryoSerializer.class.getName());
	conf.set("spark.kryo.registrator", GeoSparkKryoRegistrator.class.getName());

	sc = new JavaSparkContext(conf);
       Logger.getLogger("org").setLevel(Level.WARN);
       Logger.getLogger("akka").setLevel(Level.WARN);
       
       String resourceFolder = System.getProperty("user.dir")+"/src/test/resources/";

       PointRDDInputLocation = resourceFolder+"arealm-small.csv";
       PointRDDSplitter = FileDataSplitter.CSV;
       PointRDDIndexType = IndexType.RTREE;
       PointRDDNumPartitions = 5;
	PointRDDOffset = 0;
	
       PolygonRDDInputLocation = resourceFolder + "primaryroads-polygon.csv";
       PolygonRDDSplitter = FileDataSplitter.CSV;
       PolygonRDDNumPartitions = 5;
       PolygonRDDStartOffset = 0;        
       PolygonRDDEndOffset = 8;

	geometryFactory=new GeometryFactory();
	kNNQueryPoint=geometryFactory.createPoint(new Coordinate(-84.01, 34.01));
	rangeQueryWindow=new Envelope (-90.01,-80.01,30.01,40.01);
	joinQueryPartitioningType = GridType.QUADTREE;
	eachQueryLoopTimes=5;
	
       ShapeFileInputLocation = resourceFolder+"shapefiles/polygon";

	try {
		testSpatialRangeQuery();
		testSpatialRangeQueryUsingIndex();
		testSpatialKnnQuery();
		testSpatialKnnQueryUsingIndex();
		testSpatialJoinQuery();
		testSpatialJoinQueryUsingIndex();
		testDistanceJoinQuery();
		testDistanceJoinQueryUsingIndex();
		testCRSTransformationSpatialRangeQuery();
		testCRSTransformationSpatialRangeQueryUsingIndex();
		testLoadShapefileIntoPolygonRDD();
	} catch (Exception e) {
		e.printStackTrace();
		System.out.println("GeoSpark DEMOs failed!");
		return;
	}		
       sc.stop();
	System.out.println("All GeoSpark DEMOs passed!");

}
 
开发者ID:DataSystemsLab,项目名称:GeoSpark,代码行数:58,代码来源:Example.java

示例12: SpliceKryoSerializerInstance

import org.apache.spark.serializer.KryoSerializer; //导入依赖的package包/类
public SpliceKryoSerializerInstance(KryoSerializer ks) {
    super(ks,false);
}
 
开发者ID:splicemachine,项目名称:spliceengine,代码行数:4,代码来源:SpliceKryoSerializerInstance.java

示例13: roundTripInKryo

import org.apache.spark.serializer.KryoSerializer; //导入依赖的package包/类
/**
 * Takes an input object and returns the value of the object after it has been serialized and then deserialized in Kryo.
 * Requires the class of the input object as a parameter because it's not generally possible to get the class of a
 * generified method parameter with reflection.
 *
 * @param input instance of inputClazz.  Never {@code null}
 * @param inputClazz class to cast input
 * @param conf Spark configuration to test
 * @param <T> class to attempt.  Same or subclass of inputClazz
 * @return serialized and deserialized instance of input.  Throws exception if serialization round trip fails.
 */
public static <T> T roundTripInKryo(final T input, final Class<?> inputClazz, final SparkConf conf) {
    Utils.nonNull(input);
    final KryoSerializer kryoSerializer = new KryoSerializer(conf);
    final SerializerInstance sparkSerializer = kryoSerializer.newInstance();
    final ClassTag<T> tag = ClassTag$.MODULE$.apply(inputClazz);
    return sparkSerializer.deserialize(sparkSerializer.serialize(input, tag), tag);
}
 
开发者ID:broadinstitute,项目名称:gatk,代码行数:19,代码来源:SparkTestUtils.java


注:本文中的org.apache.spark.serializer.KryoSerializer类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。