当前位置: 首页>>代码示例>>Java>>正文


Java SerializableWritable类代码示例

本文整理汇总了Java中org.apache.spark.SerializableWritable的典型用法代码示例。如果您正苦于以下问题:Java SerializableWritable类的具体用法?Java SerializableWritable怎么用?Java SerializableWritable使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


SerializableWritable类属于org.apache.spark包,在下文中一共展示了SerializableWritable类的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: runCheck

import org.apache.spark.SerializableWritable; //导入依赖的package包/类
/**
 * After adding data to the table start a mr job to check the bulk load.
 */
public void runCheck() throws Exception {
  LOG.info("Running check");
  String jobName = IntegrationTestSparkBulkLoad.class.getSimpleName() + "_check" +
          EnvironmentEdgeManager.currentTime();

  SparkConf sparkConf = new SparkConf().setAppName(jobName).setMaster("local");
  Configuration hbaseConf = new Configuration(getConf());
  JavaSparkContext jsc = new JavaSparkContext(sparkConf);
  JavaHBaseContext hbaseContext = new JavaHBaseContext(jsc, hbaseConf);

  Scan scan = new Scan();
  scan.addFamily(CHAIN_FAM);
  scan.addFamily(SORT_FAM);
  scan.setMaxVersions(1);
  scan.setCacheBlocks(false);
  scan.setBatch(1000);
  int replicaCount = conf.getInt(NUM_REPLICA_COUNT_KEY, DEFAULT_NUM_REPLICA_COUNT);
  if (replicaCount != DEFAULT_NUM_REPLICA_COUNT) {
    scan.setConsistency(Consistency.TIMELINE);
  }

  // 1. Using TableInputFormat to get data from HBase table
  // 2. Mimic LinkedListCheckingMapper in mapreduce.IntegrationTestBulkLoad
  // 3. Sort LinkKey by its order ID
  // 4. Group LinkKey if they have same chainId, and repartition RDD by NaturalKeyPartitioner
  // 5. Check LinkList in each Partition using LinkedListCheckingFlatMapFunc
  hbaseContext.hbaseRDD(getTablename(), scan).flatMapToPair(new LinkedListCheckingFlatMapFunc())
      .sortByKey()
      .combineByKey(new createCombinerFunc(), new mergeValueFunc(), new mergeCombinersFunc(),
          new NaturalKeyPartitioner(new SerializableWritable<>(hbaseConf)))
      .foreach(new LinkedListCheckingForeachFunc(new SerializableWritable<>(hbaseConf)));
  jsc.close();
}
 
开发者ID:apache,项目名称:hbase,代码行数:37,代码来源:IntegrationTestSparkBulkLoad.java

示例2: writeExternal

import org.apache.spark.SerializableWritable; //导入依赖的package包/类
@Override
public void writeExternal(ObjectOutput out) throws IOException{
    Broadcast<SerializableWritable<Credentials>> credentials = SpliceSpark.getCredentials();
    if (credentials != null) {
        out.writeBoolean(true);
        out.writeObject(credentials);
    } else {
        out.writeBoolean(false);
    }
    out.writeLong(badRecordsSeen);
    out.writeLong(badRecordThreshold);
    out.writeBoolean(permissive);
    out.writeBoolean(op!=null);
    if(op!=null){
        out.writeObject(broadcastedActivation);
        out.writeInt(op.resultSetNumber());
    }
    out.writeObject(rowsRead);
    out.writeObject(rowsFiltered);
    out.writeObject(rowsWritten);
    out.writeObject(retryAttempts);
    out.writeObject(regionTooBusyExceptions);
    out.writeObject(rowsJoinedLeft);
    out.writeObject(rowsJoinedRight);
    out.writeObject(rowsProduced);
    out.writeObject(badRecordsAccumulator);
    out.writeObject(thrownErrorsRows);
    out.writeObject(retriedRows);
    out.writeObject(partialRows);
    out.writeObject(partialThrownErrorRows);
    out.writeObject(partialRetriedRows);
    out.writeObject(partialIgnoredRows);
    out.writeObject(partialWrite);
    out.writeObject(ignoredRows);
    out.writeObject(catchThrownRows);
    out.writeObject(catchRetriedRows);
    out.writeObject(pipelineRowsWritten);
}
 
开发者ID:splicemachine,项目名称:spliceengine,代码行数:39,代码来源:SparkOperationContext.java

示例3: runLinkedListSparkJob

import org.apache.spark.SerializableWritable; //导入依赖的package包/类
/**
 * Running spark job to create LinkedList for testing
 * @param iteration iteration th of this job
 * @throws Exception if an HBase operation or getting the test directory fails
 */
public void runLinkedListSparkJob(int iteration) throws Exception {
  String jobName =  IntegrationTestSparkBulkLoad.class.getSimpleName() + " _load " +
      EnvironmentEdgeManager.currentTime();

  LOG.info("Running iteration " + iteration + "in Spark Job");

  Path output = null;
  if (conf.get(BULKLOAD_OUTPUT_PATH) == null) {
    output = util.getDataTestDirOnTestFS(getTablename() + "-" + iteration);
  } else {
    output = new Path(conf.get(BULKLOAD_OUTPUT_PATH));
  }

  SparkConf sparkConf = new SparkConf().setAppName(jobName).setMaster("local");
  Configuration hbaseConf = new Configuration(getConf());
  hbaseConf.setInt(CURRENT_ROUND_NUM, iteration);
  int partitionNum = hbaseConf.getInt(BULKLOAD_PARTITIONS_NUM, DEFAULT_BULKLOAD_PARTITIONS_NUM);


  JavaSparkContext jsc = new JavaSparkContext(sparkConf);
  JavaHBaseContext hbaseContext = new JavaHBaseContext(jsc, hbaseConf);


  LOG.info("Partition RDD into " + partitionNum + " parts");
  List<String> temp = new ArrayList<>();
  JavaRDD<List<byte[]>> rdd = jsc.parallelize(temp, partitionNum).
      mapPartitionsWithIndex(new LinkedListCreationMapper(new SerializableWritable<>(hbaseConf)),
              false);

  hbaseContext.bulkLoad(rdd, getTablename(), new ListToKeyValueFunc(), output.toUri().getPath(),
      new HashMap<>(), false, HConstants.DEFAULT_MAX_FILE_SIZE);

  try (Connection conn = ConnectionFactory.createConnection(conf);
      Admin admin = conn.getAdmin();
      Table table = conn.getTable(getTablename());
      RegionLocator regionLocator = conn.getRegionLocator(getTablename())) {
    // Create a new loader.
    LoadIncrementalHFiles loader = new LoadIncrementalHFiles(conf);

    // Load the HFiles into table.
    loader.doBulkLoad(output, admin, table, regionLocator);
  }


  // Delete the files.
  util.getTestFileSystem().delete(output, true);
  jsc.close();
}
 
开发者ID:apache,项目名称:hbase,代码行数:54,代码来源:IntegrationTestSparkBulkLoad.java

示例4: LinkedListCreationMapper

import org.apache.spark.SerializableWritable; //导入依赖的package包/类
public LinkedListCreationMapper(SerializableWritable conf) {
  this.swConfig = conf;
}
 
开发者ID:apache,项目名称:hbase,代码行数:4,代码来源:IntegrationTestSparkBulkLoad.java

示例5: NaturalKeyPartitioner

import org.apache.spark.SerializableWritable; //导入依赖的package包/类
public NaturalKeyPartitioner(SerializableWritable swConf) {
  Configuration hbaseConf = (Configuration) swConf.value();
  numPartions = hbaseConf.getInt(BULKLOAD_PARTITIONS_NUM, DEFAULT_BULKLOAD_PARTITIONS_NUM);

}
 
开发者ID:apache,项目名称:hbase,代码行数:6,代码来源:IntegrationTestSparkBulkLoad.java

示例6: LinkedListCheckingForeachFunc

import org.apache.spark.SerializableWritable; //导入依赖的package包/类
public LinkedListCheckingForeachFunc(SerializableWritable conf) {
  swConf = conf;
}
 
开发者ID:apache,项目名称:hbase,代码行数:4,代码来源:IntegrationTestSparkBulkLoad.java

示例7: readExternal

import org.apache.spark.SerializableWritable; //导入依赖的package包/类
@Override
public void readExternal(ObjectInput in)
        throws IOException, ClassNotFoundException{
    Credentials credentials = null;
    if (in.readBoolean()) {
        // we've got credentials to apply
        Broadcast<SerializableWritable<Credentials>> bcast = (Broadcast<SerializableWritable<Credentials>>) in.readObject();
        credentials = bcast.getValue().value();
    }
    badRecordsSeen = in.readLong();
    badRecordThreshold = in.readLong();
    permissive=in.readBoolean();
    SpliceSpark.setupSpliceStaticComponents(credentials);
    boolean isOp=in.readBoolean();
    if(isOp){
        broadcastedActivation = (BroadcastedActivation)in.readObject();
        op=(Op)broadcastedActivation.getActivationHolder().getOperationsMap().get(in.readInt());
        activation=broadcastedActivation.getActivationHolder().getActivation();
    }
    rowsRead=(LongAccumulator)in.readObject();
    rowsFiltered=(LongAccumulator)in.readObject();
    rowsWritten=(LongAccumulator)in.readObject();
    retryAttempts =(LongAccumulator)in.readObject();
    regionTooBusyExceptions =(LongAccumulator)in.readObject();
    rowsJoinedLeft=(LongAccumulator)in.readObject();
    rowsJoinedRight=(LongAccumulator)in.readObject();
    rowsProduced=(LongAccumulator)in.readObject();
    badRecordsAccumulator = (Accumulable<BadRecordsRecorder,String>) in.readObject();

    thrownErrorsRows=(LongAccumulator)in.readObject();
    retriedRows=(LongAccumulator)in.readObject();
    partialRows=(LongAccumulator)in.readObject();
    partialThrownErrorRows=(LongAccumulator)in.readObject();
    partialRetriedRows=(LongAccumulator)in.readObject();
    partialIgnoredRows=(LongAccumulator)in.readObject();
    partialWrite=(LongAccumulator)in.readObject();
    ignoredRows=(LongAccumulator)in.readObject();
    catchThrownRows=(LongAccumulator)in.readObject();
    catchRetriedRows=(LongAccumulator)in.readObject();
    pipelineRowsWritten=(LongAccumulator)in.readObject();
}
 
开发者ID:splicemachine,项目名称:spliceengine,代码行数:42,代码来源:SparkOperationContext.java

示例8: setCredentials

import org.apache.spark.SerializableWritable; //导入依赖的package包/类
public static synchronized  void setCredentials(Broadcast<SerializableWritable<Credentials>> creds) {
    credentials = creds;
}
 
开发者ID:splicemachine,项目名称:spliceengine,代码行数:4,代码来源:SpliceSpark.java

示例9: getCredentials

import org.apache.spark.SerializableWritable; //导入依赖的package包/类
public static synchronized  Broadcast<SerializableWritable<Credentials>> getCredentials() {
    return credentials;
}
 
开发者ID:splicemachine,项目名称:spliceengine,代码行数:4,代码来源:SpliceSpark.java


注:本文中的org.apache.spark.SerializableWritable类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。