当前位置: 首页>>代码示例>>Java>>正文


Java HFile.Writer方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.io.hfile.HFile.Writer方法的典型用法代码示例。如果您正苦于以下问题:Java HFile.Writer方法的具体用法?Java HFile.Writer怎么用?Java HFile.Writer使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.io.hfile.HFile的用法示例。


在下文中一共展示了HFile.Writer方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: createDeleteBloomAtWrite

import org.apache.hadoop.hbase.io.hfile.HFile; //导入方法依赖的package包/类
/**
 * Creates a new Delete Family Bloom filter at the time of
 * {@link org.apache.hadoop.hbase.regionserver.StoreFile} writing.
 * @param conf
 * @param cacheConf
 * @param maxKeys an estimate of the number of keys we expect to insert.
 *        Irrelevant if compound Bloom filters are enabled.
 * @param writer the HFile writer
 * @return the new Bloom filter, or null in case Bloom filters are disabled
 *         or when failed to create one.
 */
public static BloomFilterWriter createDeleteBloomAtWrite(Configuration conf,
    CacheConfig cacheConf, int maxKeys, HFile.Writer writer) {
  if (!isDeleteFamilyBloomEnabled(conf)) {
    LOG.info("Delete Bloom filters are disabled by configuration for "
        + writer.getPath()
        + (conf == null ? " (configuration is null)" : ""));
    return null;
  }

  float err = getErrorRate(conf);

  int maxFold = getMaxFold(conf);
  // In case of compound Bloom filters we ignore the maxKeys hint.
  CompoundBloomFilterWriter bloomWriter = new CompoundBloomFilterWriter(getBloomBlockSize(conf),
      err, Hash.getHashType(conf), maxFold, cacheConf.shouldCacheBloomsOnWrite(),
      KeyValue.RAW_COMPARATOR);
  writer.addInlineBlockWriter(bloomWriter);
  return bloomWriter;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:31,代码来源:BloomFilterFactory.java

示例2: createHFile

import org.apache.hadoop.hbase.io.hfile.HFile; //导入方法依赖的package包/类
private void createHFile(Path path,
    byte[] family, byte[] qualifier,
    byte[] startKey, byte[] endKey, int numRows) throws IOException {

  HFile.Writer writer = null;
  long now = System.currentTimeMillis();
  try {
    HFileContext context = new HFileContextBuilder().build();
    writer = HFile.getWriterFactory(conf, new CacheConfig(conf))
        .withPath(fs, path)
        .withFileContext(context)
        .create();
    // subtract 2 since numRows doesn't include boundary keys
    for (byte[] key : Bytes.iterateOnSplits(startKey, endKey, true, numRows-2)) {
      KeyValue kv = new KeyValue(key, family, qualifier, now, key);
      writer.append(kv);
    }
  } finally {
    if(writer != null)
      writer.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:TestAccessController.java

示例3: createHFile

import org.apache.hadoop.hbase.io.hfile.HFile; //导入方法依赖的package包/类
private static void createHFile(
    Configuration conf,
    FileSystem fs, Path path,
    byte[] family, byte[] qualifier) throws IOException {
  HFileContext context = new HFileContextBuilder().build();
  HFile.Writer writer = HFile.getWriterFactory(conf, new CacheConfig(conf))
      .withPath(fs, path)
      .withFileContext(context)
      .create();
  long now = System.currentTimeMillis();
  try {
    for (int i =1;i<=9;i++) {
      KeyValue kv = new KeyValue(Bytes.toBytes(i+""), family, qualifier, now, Bytes.toBytes(i+""));
      writer.append(kv);
    }
  } finally {
    writer.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:TestRegionObserverInterface.java

示例4: createHFile

import org.apache.hadoop.hbase.io.hfile.HFile; //导入方法依赖的package包/类
/**
 * Create an HFile with the given number of rows with a specified value.
 */
public static void createHFile(FileSystem fs, Path path, byte[] family,
    byte[] qualifier, byte[] value, int numRows) throws IOException {
  HFileContext context = new HFileContextBuilder().withBlockSize(BLOCKSIZE)
                          .withCompression(COMPRESSION)
                          .build();
  HFile.Writer writer = HFile
      .getWriterFactory(conf, new CacheConfig(conf))
      .withPath(fs, path)
      .withFileContext(context)
      .create();
  long now = System.currentTimeMillis();
  try {
    // subtract 2 since iterateOnSplits doesn't include boundary keys
    for (int i = 0; i < numRows; i++) {
      KeyValue kv = new KeyValue(rowkey(i), family, qualifier, now, value);
      writer.append(kv);
    }
    writer.appendFileInfo(StoreFile.BULKLOAD_TIME_KEY, Bytes.toBytes(now));
  } finally {
    writer.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:26,代码来源:TestHRegionServerBulkLoad.java

示例5: createHFileForFamilies

import org.apache.hadoop.hbase.io.hfile.HFile; //导入方法依赖的package包/类
private String createHFileForFamilies(byte[] family) throws IOException {
  HFile.WriterFactory hFileFactory = HFile.getWriterFactoryNoCache(conf);
  // TODO We need a way to do this without creating files
  File hFileLocation = testFolder.newFile();
  FSDataOutputStream out = new FSDataOutputStream(new FileOutputStream(hFileLocation));
  try {
    hFileFactory.withOutputStream(out);
    hFileFactory.withFileContext(new HFileContext());
    HFile.Writer writer = hFileFactory.create();
    try {
      writer.append(new KeyValue(CellUtil.createCell(randomBytes,
          family,
          randomBytes,
          0l,
          KeyValue.Type.Put.getCode(),
          randomBytes)));
    } finally {
      writer.close();
    }
  } finally {
    out.close();
  }
  return hFileLocation.getAbsoluteFile().getAbsolutePath();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:25,代码来源:TestBulkLoad.java

示例6: createHFileForFamilies

import org.apache.hadoop.hbase.io.hfile.HFile; //导入方法依赖的package包/类
private String createHFileForFamilies(Path testPath, byte[] family,
    byte[] valueBytes) throws IOException {
  HFile.WriterFactory hFileFactory = HFile.getWriterFactoryNoCache(TEST_UTIL.getConfiguration());
  // TODO We need a way to do this without creating files
  Path testFile = new Path(testPath, UUID.randomUUID().toString());
  FSDataOutputStream out = TEST_UTIL.getTestFileSystem().create(testFile);
  try {
    hFileFactory.withOutputStream(out);
    hFileFactory.withFileContext(new HFileContext());
    HFile.Writer writer = hFileFactory.create();
    try {
      writer.append(new KeyValue(CellUtil.createCell(valueBytes, family, valueBytes, 0l,
        KeyValue.Type.Put.getCode(), valueBytes)));
    } finally {
      writer.close();
    }
  } finally {
    out.close();
  }
  return testFile.toString();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:TestHRegionReplayEvents.java

示例7: testIndexHalfStoreFileReaderWithSeekTo

import org.apache.hadoop.hbase.io.hfile.HFile; //导入方法依赖的package包/类
@Test
public void testIndexHalfStoreFileReaderWithSeekTo() throws Exception {
  HBaseTestingUtility test_util = new HBaseTestingUtility();
  String root_dir = test_util.getDataTestDir("TestIndexHalfStoreFile").toString();
  Path p = new Path(root_dir, "test");
  Configuration conf = test_util.getConfiguration();
  FileSystem fs = FileSystem.get(conf);
  CacheConfig cacheConf = new CacheConfig(conf);
  HFileContext meta = new HFileContextBuilder().withBlockSize(1024).build();

  HFile.Writer w =
      HFile.getWriterFactory(conf, cacheConf).withPath(fs, p).withFileContext(meta)
          .withComparator(KeyValue.COMPARATOR).create();
  String usertableName = "testIndexHalfStore";
  List<KeyValue> items = genSomeKeys(usertableName);
  for (KeyValue kv : items) {
    w.append(kv);
  }
  w.close();
  HFile.Reader r = HFile.createReader(fs, p, cacheConf, conf);
  r.loadFileInfo();
  byte[] midkey = "005".getBytes();
  Reference top = new Reference(midkey, Reference.Range.top);
  doTestOfScanAndReseek(p, fs, top, cacheConf, conf);
  r.close();
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:27,代码来源:TestIndexHalfStoreFileReader.java

示例8: createHFile

import org.apache.hadoop.hbase.io.hfile.HFile; //导入方法依赖的package包/类
/**
 * Create an HFile with the given number of rows between a given
 * start key and end key.
 * TODO put me in an HFileTestUtil or something?
 */
static void createHFile(
    Configuration conf,
    FileSystem fs, Path path,
    byte[] family, byte[] qualifier,
    byte[] startKey, byte[] endKey, int numRows) throws IOException
{
  HFile.Writer writer = HFile.getWriterFactory(conf, new CacheConfig(conf))
      .withPath(fs, path)
      .withBlockSize(BLOCKSIZE)
      .withCompression(COMPRESSION)
      .withComparator(KeyValue.KEY_COMPARATOR)
      .create();
  long now = System.currentTimeMillis();
  try {
    // subtract 2 since iterateOnSplits doesn't include boundary keys
    for (byte[] key : Bytes.iterateOnSplits(startKey, endKey, numRows-2)) {
      KeyValue kv = new KeyValue(key, family, qualifier, now, key);
      writer.append(kv);
    }
  } finally {
    writer.close();
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:29,代码来源:TestLoadIncrementalHFiles.java

示例9: createHFile

import org.apache.hadoop.hbase.io.hfile.HFile; //导入方法依赖的package包/类
/**
 * Create an HFile with the given number of rows with a specified value.
 */
public static void createHFile(FileSystem fs, Path path, byte[] family,
    byte[] qualifier, byte[] value, int numRows) throws IOException {
  HFile.Writer writer = HFile
      .getWriterFactory(conf, new CacheConfig(conf))
      .withPath(fs, path)
      .withBlockSize(BLOCKSIZE)
      .withCompression(COMPRESSION)
      .withComparator(KeyValue.KEY_COMPARATOR)
      .create();
  long now = System.currentTimeMillis();
  try {
    // subtract 2 since iterateOnSplits doesn't include boundary keys
    for (int i = 0; i < numRows; i++) {
      KeyValue kv = new KeyValue(rowkey(i), family, qualifier, now, value);
      writer.append(kv);
    }
  } finally {
    writer.close();
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:24,代码来源:TestHRegionServerBulkLoad.java

示例10: createHFile

import org.apache.hadoop.hbase.io.hfile.HFile; //导入方法依赖的package包/类
/**
 * Create an HFile with the given number of rows between a given
 * start key and end key.
 */
public static void createHFile(
    Configuration configuration,
    FileSystem fs, Path path,
    byte[] family, byte[] qualifier,
    byte[] startKey, byte[] endKey, int numRows) throws IOException
{
  HFileContext meta = new HFileContextBuilder().build();
  HFile.Writer writer = HFile.getWriterFactory(configuration, new CacheConfig(configuration))
      .withPath(fs, path)
      .withFileContext(meta)
      .create();
  long now = System.currentTimeMillis();
  try {
    // subtract 2 since iterateOnSplits doesn't include boundary keys
    for (byte[] key : Bytes.iterateOnSplits(startKey, endKey, numRows-2)) {
      KeyValue kv = new KeyValue(key, family, qualifier, now, key);
      writer.append(kv);
    }
  } finally {
    writer.appendFileInfo(StoreFile.BULKLOAD_TIME_KEY,
        Bytes.toBytes(System.currentTimeMillis()));
    writer.close();
  }
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:29,代码来源:HFileTestUtil.java

示例11: doSmokeTest

import org.apache.hadoop.hbase.io.hfile.HFile; //导入方法依赖的package包/类
public static void doSmokeTest(FileSystem fs, Path path, String codec)
throws Exception {
  Configuration conf = HBaseConfiguration.create();
  HFileContext context = new HFileContextBuilder()
                         .withCompression(AbstractHFileWriter.compressionByName(codec)).build();
  HFile.Writer writer = HFile.getWriterFactoryNoCache(conf)
      .withPath(fs, path)
      .withFileContext(context)
      .create();
  // Write any-old Cell...
  final byte [] rowKey = Bytes.toBytes("compressiontestkey");
  Cell c = CellUtil.createCell(rowKey, Bytes.toBytes("compressiontestval"));
  writer.append(c);
  writer.appendFileInfo(Bytes.toBytes("compressioninfokey"), Bytes.toBytes("compressioninfoval"));
  writer.close();
  Cell cc = null;
  HFile.Reader reader = HFile.createReader(fs, path, new CacheConfig(conf), conf);
  try {
    reader.loadFileInfo();
    HFileScanner scanner = reader.getScanner(false, true);
    scanner.seekTo(); // position to the start of file
    // Scanner does not do Cells yet. Do below for now till fixed.
    cc = scanner.getKeyValue();
    if (CellComparator.compareRows(c, cc) != 0) {
      throw new Exception("Read back incorrect result: " + c.toString() + " vs " + cc.toString());
    }
  } finally {
    reader.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:31,代码来源:CompressionTest.java

示例12: createGeneralBloomAtWrite

import org.apache.hadoop.hbase.io.hfile.HFile; //导入方法依赖的package包/类
/**
 * Creates a new general (Row or RowCol) Bloom filter at the time of
 * {@link org.apache.hadoop.hbase.regionserver.StoreFile} writing.
 *
 * @param conf
 * @param cacheConf
 * @param bloomType
 * @param maxKeys an estimate of the number of keys we expect to insert.
 *        Irrelevant if compound Bloom filters are enabled.
 * @param writer the HFile writer
 * @return the new Bloom filter, or null in case Bloom filters are disabled
 *         or when failed to create one.
 */
public static BloomFilterWriter createGeneralBloomAtWrite(Configuration conf,
    CacheConfig cacheConf, BloomType bloomType, int maxKeys,
    HFile.Writer writer) {
  if (!isGeneralBloomEnabled(conf)) {
    LOG.trace("Bloom filters are disabled by configuration for "
        + writer.getPath()
        + (conf == null ? " (configuration is null)" : ""));
    return null;
  } else if (bloomType == BloomType.NONE) {
    LOG.trace("Bloom filter is turned off for the column family");
    return null;
  }

  float err = getErrorRate(conf);

  // In case of row/column Bloom filter lookups, each lookup is an OR if two
  // separate lookups. Therefore, if each lookup's false positive rate is p,
  // the resulting false positive rate is err = 1 - (1 - p)^2, and
  // p = 1 - sqrt(1 - err).
  if (bloomType == BloomType.ROWCOL) {
    err = (float) (1 - Math.sqrt(1 - err));
  }

  int maxFold = conf.getInt(IO_STOREFILE_BLOOM_MAX_FOLD,
      MAX_ALLOWED_FOLD_FACTOR);

  // Do we support compound bloom filters?
  // In case of compound Bloom filters we ignore the maxKeys hint.
  CompoundBloomFilterWriter bloomWriter = new CompoundBloomFilterWriter(getBloomBlockSize(conf),
      err, Hash.getHashType(conf), maxFold, cacheConf.shouldCacheBloomsOnWrite(),
      bloomType == BloomType.ROWCOL ? KeyValue.COMPARATOR : KeyValue.RAW_COMPARATOR);
  writer.addInlineBlockWriter(bloomWriter);
  return bloomWriter;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:48,代码来源:BloomFilterFactory.java

示例13: writeToHFile

import org.apache.hadoop.hbase.io.hfile.HFile; //导入方法依赖的package包/类
private Path writeToHFile(long l, String hFilePath, String pathStr, boolean nativeHFile)
    throws IOException {
  FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration());
  final Path hfilePath = new Path(hFilePath);
  fs.mkdirs(hfilePath);
  Path path = new Path(pathStr);
  HFile.WriterFactory wf = HFile.getWriterFactoryNoCache(TEST_UTIL.getConfiguration());
  Assert.assertNotNull(wf);
  HFileContext context = new HFileContext();
  HFile.Writer writer = wf.withPath(fs, path).withFileContext(context).create();
  KeyValue kv = new KeyValue(Bytes.toBytes("row1"), Bytes.toBytes("col"), Bytes.toBytes("q"), l,
      Bytes.toBytes("version2"));

  // Set cell seq id to test bulk load native hfiles.
  if (nativeHFile) {
    // Set a big seq id. Scan should not look at this seq id in a bulk loaded file.
    // Scan should only look at the seq id appended at the bulk load time, and not skip
    // this kv.
    kv.setSequenceId(9999999);
  }

  writer.append(kv);

  if (nativeHFile) {
    // Set a big MAX_SEQ_ID_KEY. Scan should not look at this seq id in a bulk loaded file.
    // Scan should only look at the seq id appended at the bulk load time, and not skip its
    // kv.
    writer.appendFileInfo(StoreFile.MAX_SEQ_ID_KEY, Bytes.toBytes(new Long(9999999)));
  }
  else {
  writer.appendFileInfo(StoreFile.BULKLOAD_TIME_KEY, Bytes.toBytes(System.currentTimeMillis()));
  }
  writer.close();
  return hfilePath;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:36,代码来源:TestScannerWithBulkload.java

示例14: testHalfScanAndReseek

import org.apache.hadoop.hbase.io.hfile.HFile; //导入方法依赖的package包/类
/**
 * Test the scanner and reseek of a half hfile scanner. The scanner API
 * demands that seekTo and reseekTo() only return < 0 if the key lies
 * before the start of the file (with no position on the scanner). Returning
 * 0 if perfect match (rare), and return > 1 if we got an imperfect match.
 *
 * The latter case being the most common, we should generally be returning 1,
 * and if we do, there may or may not be a 'next' in the scanner/file.
 *
 * A bug in the half file scanner was returning -1 at the end of the bottom
 * half, and that was causing the infrastructure above to go null causing NPEs
 * and other problems.  This test reproduces that failure, and also tests
 * both the bottom and top of the file while we are at it.
 *
 * @throws IOException
 */
@Test
public void testHalfScanAndReseek() throws IOException {
  String root_dir = TEST_UTIL.getDataTestDir().toString();
  Path p = new Path(root_dir, "test");

  Configuration conf = TEST_UTIL.getConfiguration();
  FileSystem fs = FileSystem.get(conf);
  CacheConfig cacheConf = new CacheConfig(conf);
  HFileContext meta = new HFileContextBuilder().withBlockSize(1024).build();
  HFile.Writer w = HFile.getWriterFactory(conf, cacheConf)
      .withPath(fs, p)
      .withFileContext(meta)
      .create();

  // write some things.
  List<KeyValue> items = genSomeKeys();
  for (KeyValue kv : items) {
    w.append(kv);
  }
  w.close();

  HFile.Reader r = HFile.createReader(fs, p, cacheConf, conf);
  r.loadFileInfo();
  byte [] midkey = r.midkey();
  KeyValue midKV = KeyValue.createKeyValueFromKey(midkey);
  midkey = midKV.getRow();

  //System.out.println("midkey: " + midKV + " or: " + Bytes.toStringBinary(midkey));

  Reference bottom = new Reference(midkey, Reference.Range.bottom);
  doTestOfScanAndReseek(p, fs, bottom, cacheConf);

  Reference top = new Reference(midkey, Reference.Range.top);
  doTestOfScanAndReseek(p, fs, top, cacheConf);

  r.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:54,代码来源:TestHalfStoreFileReader.java

示例15: testHalfScanAndReseek

import org.apache.hadoop.hbase.io.hfile.HFile; //导入方法依赖的package包/类
/**
 * Test the scanner and reseek of a half hfile scanner. The scanner API
 * demands that seekTo and reseekTo() only return < 0 if the key lies
 * before the start of the file (with no position on the scanner). Returning
 * 0 if perfect match (rare), and return > 1 if we got an imperfect match.
 *
 * The latter case being the most common, we should generally be returning 1,
 * and if we do, there may or may not be a 'next' in the scanner/file.
 *
 * A bug in the half file scanner was returning -1 at the end of the bottom
 * half, and that was causing the infrastructure above to go null causing NPEs
 * and other problems.  This test reproduces that failure, and also tests
 * both the bottom and top of the file while we are at it.
 *
 * @throws IOException
 */
@Test
public void testHalfScanAndReseek() throws IOException {
  HBaseTestingUtility test_util = new HBaseTestingUtility();
  String root_dir = test_util.getDataTestDir("TestHalfStoreFile").toString();
  Path p = new Path(root_dir, "test");

  Configuration conf = test_util.getConfiguration();
  FileSystem fs = FileSystem.get(conf);
  CacheConfig cacheConf = new CacheConfig(conf);

  HFile.Writer w = HFile.getWriterFactory(conf, cacheConf)
      .withPath(fs, p)
      .withBlockSize(1024)
      .withComparator(KeyValue.KEY_COMPARATOR)
      .create();

  // write some things.
  List<KeyValue> items = genSomeKeys();
  for (KeyValue kv : items) {
    w.append(kv);
  }
  w.close();

  HFile.Reader r = HFile.createReader(fs, p, cacheConf);
  r.loadFileInfo();
  byte [] midkey = r.midkey();
  KeyValue midKV = KeyValue.createKeyValueFromKey(midkey);
  midkey = midKV.getRow();

  //System.out.println("midkey: " + midKV + " or: " + Bytes.toStringBinary(midkey));

  Reference bottom = new Reference(midkey, Reference.Range.bottom);
  doTestOfScanAndReseek(p, fs, bottom, cacheConf);

  Reference top = new Reference(midkey, Reference.Range.top);
  doTestOfScanAndReseek(p, fs, top, cacheConf);

  r.close();
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:56,代码来源:TestHalfStoreFileReader.java


注:本文中的org.apache.hadoop.hbase.io.hfile.HFile.Writer方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。