当前位置: 首页>>代码示例>>Java>>正文


Java Range类代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.io.Reference.Range的典型用法代码示例。如果您正苦于以下问题:Java Range类的具体用法?Java Range怎么用?Java Range使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


Range类属于org.apache.hadoop.hbase.io.Reference包,在下文中一共展示了Range类的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: splitStoreFile

import org.apache.hadoop.hbase.io.Reference.Range; //导入依赖的package包/类
/**
 * Split a storefile into a top and bottom half, maintaining
 * the metadata, recreating bloom filters, etc.
 */
static void splitStoreFile(
    Configuration conf, Path inFile,
    HColumnDescriptor familyDesc, byte[] splitKey,
    Path bottomOut, Path topOut) throws IOException
{
  // Open reader with no block cache, and not in-memory
  Reference topReference = new Reference(splitKey, Range.top);
  Reference bottomReference = new Reference(splitKey, Range.bottom);

  copyHFileHalf(conf, inFile, topOut, topReference, familyDesc);
  copyHFileHalf(conf, inFile, bottomOut, bottomReference, familyDesc);
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:17,代码来源:LoadIncrementalHFiles.java

示例2: splitStoreFile

import org.apache.hadoop.hbase.io.Reference.Range; //导入依赖的package包/类
private void splitStoreFile(final StoreFile sf, final Path splitdir)
throws IOException {
  FileSystem fs = this.parent.getFilesystem();
  byte [] family = sf.getFamily();
  String encoded = this.hri_a.getEncodedName();
  Path storedir = Store.getStoreHomedir(splitdir, encoded, family);
  StoreFile.split(fs, storedir, sf, this.splitrow, Range.bottom);
  encoded = this.hri_b.getEncodedName();
  storedir = Store.getStoreHomedir(splitdir, encoded, family);
  StoreFile.split(fs, storedir, sf, this.splitrow, Range.top);
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:12,代码来源:SplitTransaction.java

示例3: testReference

import org.apache.hadoop.hbase.io.Reference.Range; //导入依赖的package包/类
/**
 * Test that our mechanism of writing store files in one region to reference
 * store files in other regions works.
 * @throws IOException
 */
public void testReference()
throws IOException {
  // Make up a directory hierarchy that has a regiondir ("7e0102") and familyname.
  Path storedir = new Path(new Path(this.testDir, "7e0102"), "familyname");
  // Make a store file and write data to it.
  StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf,
      this.fs, 8 * 1024)
          .withOutputDir(storedir)
          .build();
  writeStoreFile(writer);
  StoreFile hsf = new StoreFile(this.fs, writer.getPath(), conf, cacheConf,
      StoreFile.BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
  StoreFile.Reader reader = hsf.createReader();
  // Split on a row, not in middle of row.  Midkey returned by reader
  // may be in middle of row.  Create new one with empty column and
  // timestamp.
  KeyValue kv = KeyValue.createKeyValueFromKey(reader.midkey());
  byte [] midRow = kv.getRow();
  kv = KeyValue.createKeyValueFromKey(reader.getLastKey());
  byte [] finalRow = kv.getRow();
  // Make a reference
  Path refPath = StoreFile.split(fs, storedir, hsf, midRow, Range.top);
  StoreFile refHsf = new StoreFile(this.fs, refPath, conf, cacheConf,
      StoreFile.BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
  // Now confirm that I can read from the reference and that it only gets
  // keys from top half of the file.
  HFileScanner s = refHsf.createReader().getScanner(false, false);
  for(boolean first = true; (!s.isSeeked() && s.seekTo()) || s.next();) {
    ByteBuffer bb = s.getKey();
    kv = KeyValue.createKeyValueFromKey(bb);
    if (first) {
      assertTrue(Bytes.equals(kv.getRow(), midRow));
      first = false;
    }
  }
  assertTrue(Bytes.equals(kv.getRow(), finalRow));
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:43,代码来源:TestStoreFile.java

示例4: splitStoreFile

import org.apache.hadoop.hbase.io.Reference.Range; //导入依赖的package包/类
/**
 * Split a storefile into a top and bottom half, maintaining the metadata, recreating bloom
 * filters, etc.
 */
static void splitStoreFile(Configuration conf, Path inFile, HColumnDescriptor familyDesc,
    byte[] splitKey, Path bottomOut, Path topOut) throws IOException {
  // Open reader with no block cache, and not in-memory
  Reference topReference = new Reference(splitKey, Range.top);
  Reference bottomReference = new Reference(splitKey, Range.bottom);

  copyHFileHalf(conf, inFile, topOut, topReference, familyDesc);
  copyHFileHalf(conf, inFile, bottomOut, bottomReference, familyDesc);
}
 
开发者ID:Huawei-Hadoop,项目名称:hindex,代码行数:14,代码来源:IndexLoadIncrementalHFile.java

示例5: testReferenceToHFileLink

import org.apache.hadoop.hbase.io.Reference.Range; //导入依赖的package包/类
/**
 * This test creates an hfile and then the dir structures and files to verify that references
 * to hfilelinks (created by snapshot clones) can be properly interpreted.
 */
public void testReferenceToHFileLink() throws IOException {
  final String columnFamily = "f";

  Path rootDir = FSUtils.getRootDir(conf);

  String tablename = "_original-evil-name"; // adding legal table name chars to verify regex handles it.
  HRegionInfo hri = new HRegionInfo(Bytes.toBytes(tablename));
  // store dir = <root>/<tablename>/<rgn>/<cf>
  Path storedir = new Path(new Path(rootDir,
    new Path(hri.getTableNameAsString(), hri.getEncodedName())), columnFamily);

  // Make a store file and write data to it. <root>/<tablename>/<rgn>/<cf>/<file>
  StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf,
       this.fs, 8 * 1024)
          .withOutputDir(storedir)
          .build();
  Path storeFilePath = writer.getPath();
  writeStoreFile(writer);
  writer.close();

  // create link to store file. <root>/clone/region/<cf>/<hfile>-<region>-<table>
  String target = "clone";
  Path dstPath = new Path(rootDir, new Path(new Path(target, "7e0102"), columnFamily));
  HFileLink.create(conf, this.fs, dstPath, hri, storeFilePath.getName());
  Path linkFilePath = new Path(dstPath,
                HFileLink.createHFileLinkName(hri, storeFilePath.getName()));

  // create splits of the link.
  // <root>/clone/splitA/<cf>/<reftohfilelink>,
  // <root>/clone/splitB/<cf>/<reftohfilelink>
  Path splitDirA = new Path(new Path(rootDir,
      new Path(target, "571A")), columnFamily);
  Path splitDirB = new Path(new Path(rootDir,
      new Path(target, "571B")), columnFamily);
  StoreFile f = new StoreFile(fs, linkFilePath, conf, cacheConf, BloomType.NONE,
      NoOpDataBlockEncoder.INSTANCE);
  byte[] splitRow = SPLITKEY;
  Path pathA = StoreFile.split(fs, splitDirA, f, splitRow, Range.top); // top
  Path pathB = StoreFile.split(fs, splitDirB, f, splitRow, Range.bottom); // bottom

  // OK test the thing
  FSUtils.logFileSystemState(fs, rootDir, LOG);

  // There is a case where a file with the hfilelink pattern is actually a daughter
  // reference to a hfile link.  This code in StoreFile that handles this case.

  // Try to open store file from link
  StoreFile hsfA = new StoreFile(this.fs, pathA,  conf, cacheConf,
      StoreFile.BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);

  // Now confirm that I can read from the ref to link
  int count = 1;
  HFileScanner s = hsfA.createReader().getScanner(false, false);
  s.seekTo();
  while (s.next()) {
    count++;
  }
  assertTrue(count > 0); // read some rows here

  // Try to open store file from link
  StoreFile hsfB = new StoreFile(this.fs, pathB,  conf, cacheConf,
      StoreFile.BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);

  // Now confirm that I can read from the ref to link
  HFileScanner sB = hsfB.createReader().getScanner(false, false);
  sB.seekTo();
  
  //count++ as seekTo() will advance the scanner
  count++;
  while (sB.next()) {
    count++;
  }

  // read the rest of the rows
  assertEquals((LAST_CHAR - FIRST_CHAR + 1) * (LAST_CHAR - FIRST_CHAR + 1), count);
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:81,代码来源:TestStoreFile.java


注:本文中的org.apache.hadoop.hbase.io.Reference.Range类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。