当前位置: 首页>>代码示例>>Java>>正文


Java HFileContext类代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.io.hfile.HFileContext的典型用法代码示例。如果您正苦于以下问题:Java HFileContext类的具体用法?Java HFileContext怎么用?Java HFileContext使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


HFileContext类属于org.apache.hadoop.hbase.io.hfile包,在下文中一共展示了HFileContext类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: createHFile

import org.apache.hadoop.hbase.io.hfile.HFileContext; //导入依赖的package包/类
private void createHFile(Path path,
    byte[] family, byte[] qualifier,
    byte[] startKey, byte[] endKey, int numRows) throws IOException {

  HFile.Writer writer = null;
  long now = System.currentTimeMillis();
  try {
    HFileContext context = new HFileContextBuilder().build();
    writer = HFile.getWriterFactory(conf, new CacheConfig(conf))
        .withPath(fs, path)
        .withFileContext(context)
        .create();
    // subtract 2 since numRows doesn't include boundary keys
    for (byte[] key : Bytes.iterateOnSplits(startKey, endKey, true, numRows-2)) {
      KeyValue kv = new KeyValue(key, family, qualifier, now, key);
      writer.append(kv);
    }
  } finally {
    if(writer != null)
      writer.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:TestAccessController.java

示例2: setUp

import org.apache.hadoop.hbase.io.hfile.HFileContext; //导入依赖的package包/类
@Override
void setUp() throws Exception {

  HFileContextBuilder builder = new HFileContextBuilder()
      .withCompression(AbstractHFileWriter.compressionByName(codec))
      .withBlockSize(RFILE_BLOCKSIZE);
  
  if (cipher == "aes") {
    byte[] cipherKey = new byte[AES.KEY_LENGTH];
    new SecureRandom().nextBytes(cipherKey);
    builder.withEncryptionContext(Encryption.newContext(conf)
        .setCipher(Encryption.getCipher(conf, cipher))
        .setKey(cipherKey));
  } else if (!"none".equals(cipher)) {
    throw new IOException("Cipher " + cipher + " not supported.");
  }
  
  HFileContext hFileContext = builder.build();

  writer = HFile.getWriterFactoryNoCache(conf)
      .withPath(fs, mf)
      .withFileContext(hFileContext)
      .withComparator(new KeyValue.RawBytesComparator())
      .create();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:26,代码来源:HFilePerformanceEvaluation.java

示例3: createHFile

import org.apache.hadoop.hbase.io.hfile.HFileContext; //导入依赖的package包/类
private static void createHFile(
    Configuration conf,
    FileSystem fs, Path path,
    byte[] family, byte[] qualifier) throws IOException {
  HFileContext context = new HFileContextBuilder().build();
  HFile.Writer writer = HFile.getWriterFactory(conf, new CacheConfig(conf))
      .withPath(fs, path)
      .withFileContext(context)
      .create();
  long now = System.currentTimeMillis();
  try {
    for (int i =1;i<=9;i++) {
      KeyValue kv = new KeyValue(Bytes.toBytes(i+""), family, qualifier, now, Bytes.toBytes(i+""));
      writer.append(kv);
    }
  } finally {
    writer.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:TestRegionObserverInterface.java

示例4: createHFile

import org.apache.hadoop.hbase.io.hfile.HFileContext; //导入依赖的package包/类
/**
 * Create an HFile with the given number of rows with a specified value.
 */
public static void createHFile(FileSystem fs, Path path, byte[] family,
    byte[] qualifier, byte[] value, int numRows) throws IOException {
  HFileContext context = new HFileContextBuilder().withBlockSize(BLOCKSIZE)
                          .withCompression(COMPRESSION)
                          .build();
  HFile.Writer writer = HFile
      .getWriterFactory(conf, new CacheConfig(conf))
      .withPath(fs, path)
      .withFileContext(context)
      .create();
  long now = System.currentTimeMillis();
  try {
    // subtract 2 since iterateOnSplits doesn't include boundary keys
    for (int i = 0; i < numRows; i++) {
      KeyValue kv = new KeyValue(rowkey(i), family, qualifier, now, value);
      writer.append(kv);
    }
    writer.appendFileInfo(StoreFile.BULKLOAD_TIME_KEY, Bytes.toBytes(now));
  } finally {
    writer.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:26,代码来源:TestHRegionServerBulkLoad.java

示例5: createHFileForFamilies

import org.apache.hadoop.hbase.io.hfile.HFileContext; //导入依赖的package包/类
private String createHFileForFamilies(byte[] family) throws IOException {
  HFile.WriterFactory hFileFactory = HFile.getWriterFactoryNoCache(conf);
  // TODO We need a way to do this without creating files
  File hFileLocation = testFolder.newFile();
  FSDataOutputStream out = new FSDataOutputStream(new FileOutputStream(hFileLocation));
  try {
    hFileFactory.withOutputStream(out);
    hFileFactory.withFileContext(new HFileContext());
    HFile.Writer writer = hFileFactory.create();
    try {
      writer.append(new KeyValue(CellUtil.createCell(randomBytes,
          family,
          randomBytes,
          0l,
          KeyValue.Type.Put.getCode(),
          randomBytes)));
    } finally {
      writer.close();
    }
  } finally {
    out.close();
  }
  return hFileLocation.getAbsoluteFile().getAbsolutePath();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:25,代码来源:TestBulkLoad.java

示例6: createHFileForFamilies

import org.apache.hadoop.hbase.io.hfile.HFileContext; //导入依赖的package包/类
private String createHFileForFamilies(Path testPath, byte[] family,
    byte[] valueBytes) throws IOException {
  HFile.WriterFactory hFileFactory = HFile.getWriterFactoryNoCache(TEST_UTIL.getConfiguration());
  // TODO We need a way to do this without creating files
  Path testFile = new Path(testPath, UUID.randomUUID().toString());
  FSDataOutputStream out = TEST_UTIL.getTestFileSystem().create(testFile);
  try {
    hFileFactory.withOutputStream(out);
    hFileFactory.withFileContext(new HFileContext());
    HFile.Writer writer = hFileFactory.create();
    try {
      writer.append(new KeyValue(CellUtil.createCell(valueBytes, family, valueBytes, 0l,
        KeyValue.Type.Put.getCode(), valueBytes)));
    } finally {
      writer.close();
    }
  } finally {
    out.close();
  }
  return testFile.toString();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:TestHRegionReplayEvents.java

示例7: addStoreFile

import org.apache.hadoop.hbase.io.hfile.HFileContext; //导入依赖的package包/类
private void addStoreFile() throws IOException {
  StoreFile f = this.store.getStorefiles().iterator().next();
  Path storedir = f.getPath().getParent();
  long seqid = this.store.getMaxSequenceId();
  Configuration c = TEST_UTIL.getConfiguration();
  FileSystem fs = FileSystem.get(c);
  HFileContext fileContext = new HFileContextBuilder().withBlockSize(BLOCKSIZE_SMALL).build();
  StoreFile.Writer w = new StoreFile.WriterBuilder(c, new CacheConfig(c),
      fs)
          .withOutputDir(storedir)
          .withFileContext(fileContext)
          .build();
  w.appendMetadata(seqid + 1, false);
  w.close();
  LOG.info("Added store file:" + w.getPath());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:17,代码来源:TestStore.java

示例8: testBasicHalfMapFile

import org.apache.hadoop.hbase.io.hfile.HFileContext; //导入依赖的package包/类
/**
 * Write a file and then assert that we can read from top and bottom halves
 * using two HalfMapFiles.
 * @throws Exception
 */
public void testBasicHalfMapFile() throws Exception {
  final HRegionInfo hri =
      new HRegionInfo(TableName.valueOf("testBasicHalfMapFileTb"));
  HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(
    conf, fs, new Path(this.testDir, hri.getTable().getNameAsString()), hri);

  HFileContext meta = new HFileContextBuilder().withBlockSize(2*1024).build();
  StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs)
          .withFilePath(regionFs.createTempName())
          .withFileContext(meta)
          .build();
  writeStoreFile(writer);

  Path sfPath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath());
  StoreFile sf = new StoreFile(this.fs, sfPath, conf, cacheConf,
    BloomType.NONE);
  checkHalfHFile(regionFs, sf);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:24,代码来源:TestStoreFile.java

示例9: testBloomFilter

import org.apache.hadoop.hbase.io.hfile.HFileContext; //导入依赖的package包/类
public void testBloomFilter() throws Exception {
  FileSystem fs = FileSystem.getLocal(conf);
  conf.setFloat(BloomFilterFactory.IO_STOREFILE_BLOOM_ERROR_RATE, (float) 0.01);
  conf.setBoolean(BloomFilterFactory.IO_STOREFILE_BLOOM_ENABLED, true);

  // write the file
  Path f = new Path(ROOT_DIR, getName());
  HFileContext meta = new HFileContextBuilder().withBlockSize(BLOCKSIZE_SMALL)
                      .withChecksumType(CKTYPE)
                      .withBytesPerCheckSum(CKBYTES).build();
  // Make a store file and write data to it.
  StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs)
          .withFilePath(f)
          .withBloomType(BloomType.ROW)
          .withMaxKeyCount(2000)
          .withFileContext(meta)
          .build();
  bloomWriteRead(writer, fs);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:TestStoreFile.java

示例10: testReseek

import org.apache.hadoop.hbase.io.hfile.HFileContext; //导入依赖的package包/类
/**
 * Test for HBASE-8012
 */
public void testReseek() throws Exception {
  // write the file
  Path f = new Path(ROOT_DIR, getName());
  HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
  // Make a store file and write data to it.
  StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs)
          .withFilePath(f)
          .withFileContext(meta)
          .build();

  writeStoreFile(writer);
  writer.close();

  StoreFile.Reader reader = new StoreFile.Reader(fs, f, cacheConf, conf);

  // Now do reseek with empty KV to position to the beginning of the file

  KeyValue k = KeyValueUtil.createFirstOnRow(HConstants.EMPTY_BYTE_ARRAY);
  StoreFileScanner s = reader.getStoreFileScanner(false, false);
  s.reseek(k);

  assertNotNull("Intial reseek should position at the beginning of the file", s.peek());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:27,代码来源:TestStoreFile.java

示例11: testSeekWithRandomData

import org.apache.hadoop.hbase.io.hfile.HFileContext; //导入依赖的package包/类
@Test
public void testSeekWithRandomData() throws Exception {
  PrefixTreeCodec encoder = new PrefixTreeCodec();
  ByteArrayOutputStream baosInMemory = new ByteArrayOutputStream();
  DataOutputStream userDataStream = new DataOutputStream(baosInMemory);
  int batchId = numBatchesWritten++;
  HFileContext meta = new HFileContextBuilder()
                      .withHBaseCheckSum(false)
                      .withIncludesMvcc(false)
                      .withIncludesTags(includesTag)
                      .withCompression(Algorithm.NONE)
                      .build();
  HFileBlockEncodingContext blkEncodingCtx = new HFileBlockDefaultEncodingContext(
      DataBlockEncoding.PREFIX_TREE, new byte[0], meta);
  generateRandomTestData(kvset, batchId, includesTag, encoder, blkEncodingCtx, userDataStream);
  EncodedSeeker seeker = encoder.createSeeker(KeyValue.COMPARATOR,
      encoder.newDataBlockDecodingContext(meta));
  byte[] onDiskBytes = baosInMemory.toByteArray();
  ByteBuffer readBuffer = ByteBuffer.wrap(onDiskBytes, DataBlockEncoding.ID_SIZE,
      onDiskBytes.length - DataBlockEncoding.ID_SIZE);
  verifySeeking(seeker, readBuffer, batchId);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:TestPrefixTreeEncoding.java

示例12: testSeekWithFixedData

import org.apache.hadoop.hbase.io.hfile.HFileContext; //导入依赖的package包/类
@Test
public void testSeekWithFixedData() throws Exception {
  PrefixTreeCodec encoder = new PrefixTreeCodec();
  int batchId = numBatchesWritten++;
  HFileContext meta = new HFileContextBuilder()
                      .withHBaseCheckSum(false)
                      .withIncludesMvcc(false)
                      .withIncludesTags(includesTag)
                      .withCompression(Algorithm.NONE)
                      .build();
  HFileBlockEncodingContext blkEncodingCtx = new HFileBlockDefaultEncodingContext(
      DataBlockEncoding.PREFIX_TREE, new byte[0], meta);
  ByteArrayOutputStream baosInMemory = new ByteArrayOutputStream();
  DataOutputStream userDataStream = new DataOutputStream(baosInMemory);
  generateFixedTestData(kvset, batchId, includesTag, encoder, blkEncodingCtx, userDataStream);
  EncodedSeeker seeker = encoder.createSeeker(KeyValue.COMPARATOR,
      encoder.newDataBlockDecodingContext(meta));
  byte[] onDiskBytes = baosInMemory.toByteArray();
  ByteBuffer readBuffer = ByteBuffer.wrap(onDiskBytes, DataBlockEncoding.ID_SIZE,
      onDiskBytes.length - DataBlockEncoding.ID_SIZE);
  verifySeeking(seeker, readBuffer, batchId);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:TestPrefixTreeEncoding.java

示例13: seekToTheKey

import org.apache.hadoop.hbase.io.hfile.HFileContext; //导入依赖的package包/类
private void seekToTheKey(KeyValue expected, List<KeyValue> kvs, KeyValue toSeek)
    throws IOException {
  // create all seekers
  List<DataBlockEncoder.EncodedSeeker> encodedSeekers = new ArrayList<DataBlockEncoder.EncodedSeeker>();
  for (DataBlockEncoding encoding : DataBlockEncoding.values()) {
    if (encoding.getEncoder() == null || encoding == DataBlockEncoding.PREFIX_TREE) {
      continue;
    }

    DataBlockEncoder encoder = encoding.getEncoder();
    HFileContext meta = new HFileContextBuilder().withHBaseCheckSum(false)
        .withIncludesMvcc(false).withIncludesTags(false)
        .withCompression(Compression.Algorithm.NONE).build();
    HFileBlockEncodingContext encodingContext = encoder.newDataBlockEncodingContext(encoding,
        HConstants.HFILEBLOCK_DUMMY_HEADER, meta);
    ByteBuffer encodedBuffer = TestDataBlockEncoders.encodeKeyValues(encoding, kvs,
        encodingContext);
    DataBlockEncoder.EncodedSeeker seeker = encoder.createSeeker(KeyValue.COMPARATOR,
        encoder.newDataBlockDecodingContext(meta));
    seeker.setCurrentBuffer(encodedBuffer);
    encodedSeekers.add(seeker);
  }
  // test it!
  // try a few random seeks
  checkSeekingConsistency(encodedSeekers, toSeek, expected);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:27,代码来源:TestSeekToBlockWithEncoders.java

示例14: getEncodingContext

import org.apache.hadoop.hbase.io.hfile.HFileContext; //导入依赖的package包/类
private HFileBlockEncodingContext getEncodingContext(Compression.Algorithm algo,
    DataBlockEncoding encoding) {
  DataBlockEncoder encoder = encoding.getEncoder();
  HFileContext meta = new HFileContextBuilder()
                      .withHBaseCheckSum(false)
                      .withIncludesMvcc(includesMemstoreTS)
                      .withIncludesTags(includesTags)
                      .withCompression(algo).build();
  if (encoder != null) {
    return encoder.newDataBlockEncodingContext(encoding,
        HConstants.HFILEBLOCK_DUMMY_HEADER, meta);
  } else {
    return new HFileBlockDefaultEncodingContext(encoding,
        HConstants.HFILEBLOCK_DUMMY_HEADER, meta);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:17,代码来源:TestDataBlockEncoders.java

示例15: testAlgorithm

import org.apache.hadoop.hbase.io.hfile.HFileContext; //导入依赖的package包/类
private void testAlgorithm(byte[] encodedData, ByteBuffer unencodedDataBuf,
    DataBlockEncoder encoder) throws IOException {
  // decode
  ByteArrayInputStream bais = new ByteArrayInputStream(encodedData, ENCODED_DATA_OFFSET,
      encodedData.length - ENCODED_DATA_OFFSET);
  DataInputStream dis = new DataInputStream(bais);
  ByteBuffer actualDataset;
  HFileContext meta = new HFileContextBuilder().withHBaseCheckSum(false)
      .withIncludesMvcc(includesMemstoreTS).withIncludesTags(includesTags)
      .withCompression(Compression.Algorithm.NONE).build();
  actualDataset = encoder.decodeKeyValues(dis, encoder.newDataBlockDecodingContext(meta));
  actualDataset.rewind();

  // this is because in case of prefix tree the decoded stream will not have
  // the
  // mvcc in it.
  assertEquals("Encoding -> decoding gives different results for " + encoder,
      Bytes.toStringBinary(unencodedDataBuf), Bytes.toStringBinary(actualDataset));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:TestDataBlockEncoders.java


注:本文中的org.apache.hadoop.hbase.io.hfile.HFileContext类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。