当前位置: 首页>>代码示例>>Java>>正文


Java Writables.copyWritable方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.util.Writables.copyWritable方法的典型用法代码示例。如果您正苦于以下问题:Java Writables.copyWritable方法的具体用法?Java Writables.copyWritable怎么用?Java Writables.copyWritable使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.util.Writables的用法示例。


在下文中一共展示了Writables.copyWritable方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: test_TIMERANGE

import org.apache.hadoop.hbase.util.Writables; //导入方法依赖的package包/类
@Test
public void test_TIMERANGE() throws Exception {
  Configuration conf = new Configuration(this.util.getConfiguration());
  RecordWriter<ImmutableBytesWritable, KeyValue> writer = null;
  TaskAttemptContext context = null;
  Path dir =
    util.getDataTestDir("test_TIMERANGE_present");
  LOG.info("Timerange dir writing to dir: "+ dir);
  try {
    // build a record writer using HFileOutputFormat
    Job job = new Job(conf);
    FileOutputFormat.setOutputPath(job, dir);
    context = getTestTaskAttemptContext(job);
    HFileOutputFormat hof = new HFileOutputFormat();
    writer = hof.getRecordWriter(context);

    // Pass two key values with explicit times stamps
    final byte [] b = Bytes.toBytes("b");

    // value 1 with timestamp 2000
    KeyValue kv = new KeyValue(b, b, b, 2000, b);
    KeyValue original = kv.clone();
    writer.write(new ImmutableBytesWritable(), kv);
    assertEquals(original,kv);

    // value 2 with timestamp 1000
    kv = new KeyValue(b, b, b, 1000, b);
    original = kv.clone();
    writer.write(new ImmutableBytesWritable(), kv);
    assertEquals(original, kv);

    // verify that the file has the proper FileInfo.
    writer.close(context);

    // the generated file lives 1 directory down from the attempt directory
    // and is the only file, e.g.
    // _attempt__0000_r_000000_0/b/1979617994050536795
    FileSystem fs = FileSystem.get(conf);
    Path attemptDirectory = hof.getDefaultWorkFile(context, "").getParent();
    FileStatus[] sub1 = fs.listStatus(attemptDirectory);
    FileStatus[] file = fs.listStatus(sub1[0].getPath());

    // open as HFile Reader and pull out TIMERANGE FileInfo.
    HFile.Reader rd = HFile.createReader(fs, file[0].getPath(),
        new CacheConfig(conf));
    Map<byte[],byte[]> finfo = rd.loadFileInfo();
    byte[] range = finfo.get("TIMERANGE".getBytes());
    assertNotNull(range);

    // unmarshall and check values.
    TimeRangeTracker timeRangeTracker = new TimeRangeTracker();
    Writables.copyWritable(range, timeRangeTracker);
    LOG.info(timeRangeTracker.getMinimumTimestamp() +
        "...." + timeRangeTracker.getMaximumTimestamp());
    assertEquals(1000, timeRangeTracker.getMinimumTimestamp());
    assertEquals(2000, timeRangeTracker.getMaximumTimestamp());
    rd.close();
  } finally {
    if (writer != null && context != null) writer.close(context);
    dir.getFileSystem(conf).delete(dir, true);
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:63,代码来源:TestHFileOutputFormat.java

示例2: test_TIMERANGE

import org.apache.hadoop.hbase.util.Writables; //导入方法依赖的package包/类
@Test
public void test_TIMERANGE() throws Exception {
  Configuration conf = new Configuration(this.util.getConfiguration());
  RecordWriter<ImmutableBytesWritable, Cell> writer = null;
  TaskAttemptContext context = null;
  Path dir =
    util.getDataTestDir("test_TIMERANGE_present");
  LOG.info("Timerange dir writing to dir: "+ dir);
  try {
    // build a record writer using HFileOutputFormat2
    Job job = new Job(conf);
    FileOutputFormat.setOutputPath(job, dir);
    context = createTestTaskAttemptContext(job);
    HFileOutputFormat2 hof = new HFileOutputFormat2();
    writer = hof.getRecordWriter(context);

    // Pass two key values with explicit times stamps
    final byte [] b = Bytes.toBytes("b");

    // value 1 with timestamp 2000
    KeyValue kv = new KeyValue(b, b, b, 2000, b);
    KeyValue original = kv.clone();
    writer.write(new ImmutableBytesWritable(), kv);
    assertEquals(original,kv);

    // value 2 with timestamp 1000
    kv = new KeyValue(b, b, b, 1000, b);
    original = kv.clone();
    writer.write(new ImmutableBytesWritable(), kv);
    assertEquals(original, kv);

    // verify that the file has the proper FileInfo.
    writer.close(context);

    // the generated file lives 1 directory down from the attempt directory
    // and is the only file, e.g.
    // _attempt__0000_r_000000_0/b/1979617994050536795
    FileSystem fs = FileSystem.get(conf);
    Path attemptDirectory = hof.getDefaultWorkFile(context, "").getParent();
    FileStatus[] sub1 = fs.listStatus(attemptDirectory);
    FileStatus[] file = fs.listStatus(sub1[0].getPath());

    // open as HFile Reader and pull out TIMERANGE FileInfo.
    HFile.Reader rd = HFile.createReader(fs, file[0].getPath(),
        new CacheConfig(conf), conf);
    Map<byte[],byte[]> finfo = rd.loadFileInfo();
    byte[] range = finfo.get("TIMERANGE".getBytes());
    assertNotNull(range);

    // unmarshall and check values.
    TimeRangeTracker timeRangeTracker = new TimeRangeTracker();
    Writables.copyWritable(range, timeRangeTracker);
    LOG.info(timeRangeTracker.getMinimumTimestamp() +
        "...." + timeRangeTracker.getMaximumTimestamp());
    assertEquals(1000, timeRangeTracker.getMinimumTimestamp());
    assertEquals(2000, timeRangeTracker.getMaximumTimestamp());
    rd.close();
  } finally {
    if (writer != null && context != null) writer.close(context);
    dir.getFileSystem(conf).delete(dir, true);
  }
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:63,代码来源:TestHFileOutputFormat2.java

示例3: test_TIMERANGE

import org.apache.hadoop.hbase.util.Writables; //导入方法依赖的package包/类
@Test
public void test_TIMERANGE() throws Exception {
  Configuration conf = new Configuration(this.util.getConfiguration());
  RecordWriter<ImmutableBytesWritable, KeyValue> writer = null;
  TaskAttemptContext context = null;
  Path dir =
    util.getDataTestDir("test_TIMERANGE_present");
  LOG.info("Timerange dir writing to dir: "+ dir);
  try {
    // build a record writer using HFileOutputFormat
    Job job = new Job(conf);
    FileOutputFormat.setOutputPath(job, dir);
    context = createTestTaskAttemptContext(job);
    HFileOutputFormat hof = new HFileOutputFormat();
    writer = hof.getRecordWriter(context);

    // Pass two key values with explicit times stamps
    final byte [] b = Bytes.toBytes("b");

    // value 1 with timestamp 2000
    KeyValue kv = new KeyValue(b, b, b, 2000, b);
    KeyValue original = kv.clone();
    writer.write(new ImmutableBytesWritable(), kv);
    assertEquals(original,kv);

    // value 2 with timestamp 1000
    kv = new KeyValue(b, b, b, 1000, b);
    original = kv.clone();
    writer.write(new ImmutableBytesWritable(), kv);
    assertEquals(original, kv);

    // verify that the file has the proper FileInfo.
    writer.close(context);

    // the generated file lives 1 directory down from the attempt directory
    // and is the only file, e.g.
    // _attempt__0000_r_000000_0/b/1979617994050536795
    FileSystem fs = FileSystem.get(conf);
    Path attemptDirectory = hof.getDefaultWorkFile(context, "").getParent();
    FileStatus[] sub1 = fs.listStatus(attemptDirectory);
    FileStatus[] file = fs.listStatus(sub1[0].getPath());

    // open as HFile Reader and pull out TIMERANGE FileInfo.
    HFile.Reader rd = HFile.createReader(fs, file[0].getPath(),
        new CacheConfig(conf), conf);
    Map<byte[],byte[]> finfo = rd.loadFileInfo();
    byte[] range = finfo.get("TIMERANGE".getBytes());
    assertNotNull(range);

    // unmarshall and check values.
    TimeRangeTracker timeRangeTracker = new TimeRangeTracker();
    Writables.copyWritable(range, timeRangeTracker);
    LOG.info(timeRangeTracker.getMinimumTimestamp() +
        "...." + timeRangeTracker.getMaximumTimestamp());
    assertEquals(1000, timeRangeTracker.getMinimumTimestamp());
    assertEquals(2000, timeRangeTracker.getMaximumTimestamp());
    rd.close();
  } finally {
    if (writer != null && context != null) writer.close(context);
    dir.getFileSystem(conf).delete(dir, true);
  }
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:63,代码来源:TestHFileOutputFormat.java

示例4: printMeta

import org.apache.hadoop.hbase.util.Writables; //导入方法依赖的package包/类
private void printMeta(HFile.Reader reader, Map<byte[], byte[]> fileInfo)
    throws IOException {
  System.out.println("Block index size as per heapsize: "
      + reader.indexSize());
  System.out.println(asSeparateLines(reader.toString()));
  System.out.println("Trailer:\n    "
      + asSeparateLines(reader.getTrailer().toString()));
  System.out.println("Fileinfo:");
  for (Map.Entry<byte[], byte[]> e : fileInfo.entrySet()) {
    System.out.print(FOUR_SPACES + Bytes.toString(e.getKey()) + " = ");
    if (Bytes.compareTo(e.getKey(), Bytes.toBytes("MAX_SEQ_ID_KEY")) == 0) {
      long seqid = Bytes.toLong(e.getValue());
      System.out.println(seqid);
    } else if (Bytes.compareTo(e.getKey(), Bytes.toBytes("TIMERANGE")) == 0) {
      TimeRangeTracker timeRangeTracker = new TimeRangeTracker();
      Writables.copyWritable(e.getValue(), timeRangeTracker);
      System.out.println(timeRangeTracker.getMinimumTimestamp() + "...."
          + timeRangeTracker.getMaximumTimestamp());
    } else if (Bytes.compareTo(e.getKey(), FileInfo.AVG_KEY_LEN) == 0
        || Bytes.compareTo(e.getKey(), FileInfo.AVG_VALUE_LEN) == 0) {
      System.out.println(Bytes.toInt(e.getValue()));
    } else {
      System.out.println(Bytes.toStringBinary(e.getValue()));
    }
  }

  System.out.println("Mid-key: " + Bytes.toStringBinary(reader.midkey()));

  // Printing bloom information
  DataInput bloomMeta = reader.getBloomFilterMetadata();
  BloomFilter bloomFilter = null;
  if (bloomMeta != null)
    bloomFilter = BloomFilterFactory.createFromMeta(bloomMeta, reader);

  System.out.println("Bloom filter:");
  if (bloomFilter != null) {
    System.out.println(FOUR_SPACES + bloomFilter.toString().replaceAll(
        ByteBloomFilter.STATS_RECORD_SEP, "\n" + FOUR_SPACES));
  } else {
    System.out.println(FOUR_SPACES + "Not present");
  }
}
 
开发者ID:lifeng5042,项目名称:RStore,代码行数:43,代码来源:HFilePrettyPrinter.java

示例5: test_TIMERANGE

import org.apache.hadoop.hbase.util.Writables; //导入方法依赖的package包/类
@Test
public void test_TIMERANGE() throws Exception {
  Configuration conf = new Configuration(this.util.getConfiguration());
  RecordWriter<ImmutableBytesWritable, KeyValue> writer = null;
  TaskAttemptContext context = null;
  Path dir =
    util.getDataTestDir("test_TIMERANGE_present");
  LOG.info("Timerange dir writing to dir: "+ dir);
  try {
    // build a record writer using HFileOutputFormat
    Job job = new Job(conf);
    FileOutputFormat.setOutputPath(job, dir);
    context = createTestTaskAttemptContext(job);
    HFileOutputFormat hof = new HFileOutputFormat();
    writer = hof.getRecordWriter(context);

    // Pass two key values with explicit times stamps
    final byte [] b = Bytes.toBytes("b");

    // value 1 with timestamp 2000
    KeyValue kv = new KeyValue(b, b, b, 2000, b);
    KeyValue original = kv.clone();
    writer.write(new ImmutableBytesWritable(), kv);
    assertEquals(original,kv);

    // value 2 with timestamp 1000
    kv = new KeyValue(b, b, b, 1000, b);
    original = kv.clone();
    writer.write(new ImmutableBytesWritable(), kv);
    assertEquals(original, kv);

    // verify that the file has the proper FileInfo.
    writer.close(context);

    // the generated file lives 1 directory down from the attempt directory
    // and is the only file, e.g.
    // _attempt__0000_r_000000_0/b/1979617994050536795
    FileSystem fs = FileSystem.get(conf);
    Path attemptDirectory = hof.getDefaultWorkFile(context, "").getParent();
    FileStatus[] sub1 = fs.listStatus(attemptDirectory);
    FileStatus[] file = fs.listStatus(sub1[0].getPath());

    // open as HFile Reader and pull out TIMERANGE FileInfo.
    HFile.Reader rd = HFile.createReader(fs, file[0].getPath(),
        new CacheConfig(conf));
    Map<byte[],byte[]> finfo = rd.loadFileInfo();
    byte[] range = finfo.get("TIMERANGE".getBytes());
    assertNotNull(range);

    // unmarshall and check values.
    TimeRangeTracker timeRangeTracker = new TimeRangeTracker();
    Writables.copyWritable(range, timeRangeTracker);
    LOG.info(timeRangeTracker.getMinimumTimestamp() +
        "...." + timeRangeTracker.getMaximumTimestamp());
    assertEquals(1000, timeRangeTracker.getMinimumTimestamp());
    assertEquals(2000, timeRangeTracker.getMaximumTimestamp());
    rd.close();
  } finally {
    if (writer != null && context != null) writer.close(context);
    dir.getFileSystem(conf).delete(dir, true);
  }
}
 
开发者ID:cloud-software-foundation,项目名称:c5,代码行数:63,代码来源:TestHFileOutputFormat.java


注:本文中的org.apache.hadoop.hbase.util.Writables.copyWritable方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。