当前位置: 首页>>代码示例>>Java>>正文


Java HFile类代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.io.hfile.HFile的典型用法代码示例。如果您正苦于以下问题:Java HFile类的具体用法?Java HFile怎么用?Java HFile使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


HFile类属于org.apache.hadoop.hbase.io.hfile包,在下文中一共展示了HFile类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: start

import org.apache.hadoop.hbase.io.hfile.HFile; //导入依赖的package包/类
@Override
public void start(CoprocessorEnvironment env) throws IOException {
  this.conf = env.getConfiguration();

  authorizationEnabled = isAuthorizationSupported(conf);
  if (!authorizationEnabled) {
    LOG.warn("The VisibilityController has been loaded with authorization checks disabled.");
  }

  if (HFile.getFormatVersion(conf) < HFile.MIN_FORMAT_VERSION_WITH_TAGS) {
    throw new RuntimeException("A minimum HFile version of " + HFile.MIN_FORMAT_VERSION_WITH_TAGS
      + " is required to persist visibility labels. Consider setting " + HFile.FORMAT_VERSION_KEY
      + " accordingly.");
  }

  if (env instanceof RegionServerCoprocessorEnvironment) {
    throw new RuntimeException("Visibility controller should not be configured as "
        + "'hbase.coprocessor.regionserver.classes'.");
  }
  // Do not create for master CPs
  if (!(env instanceof MasterCoprocessorEnvironment)) {
    visibilityLabelService = VisibilityLabelServiceManager.getInstance()
        .getVisibilityLabelService(this.conf);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:26,代码来源:VisibilityController.java

示例2: CompoundBloomFilter

import org.apache.hadoop.hbase.io.hfile.HFile; //导入依赖的package包/类
/**
 * De-serialization for compound Bloom filter metadata. Must be consistent
 * with what {@link CompoundBloomFilterWriter} does.
 *
 * @param meta serialized Bloom filter metadata without any magic blocks
 * @throws IOException
 */
public CompoundBloomFilter(DataInput meta, HFile.Reader reader)
    throws IOException {
  this.reader = reader;

  totalByteSize = meta.readLong();
  hashCount = meta.readInt();
  hashType = meta.readInt();
  totalKeyCount = meta.readLong();
  totalMaxKeys = meta.readLong();
  numChunks = meta.readInt();
  comparator = FixedFileTrailer.createComparator(
      Bytes.toString(Bytes.readByteArray(meta)));

  hash = Hash.getInstance(hashType);
  if (hash == null) {
    throw new IllegalArgumentException("Invalid hash type: " + hashType);
  }

  index = new HFileBlockIndex.BlockIndexReader(comparator, 1);
  index.readRootIndex(meta, numChunks);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:29,代码来源:CompoundBloomFilter.java

示例3: createFromMeta

import org.apache.hadoop.hbase.io.hfile.HFile; //导入依赖的package包/类
/**
 * Instantiates the correct Bloom filter class based on the version provided
 * in the meta block data.
 *
 * @param meta the byte array holding the Bloom filter's metadata, including
 *          version information
 * @param reader the {@link HFile} reader to use to lazily load Bloom filter
 *          blocks
 * @return an instance of the correct type of Bloom filter
 * @throws IllegalArgumentException
 */
public static BloomFilter
    createFromMeta(DataInput meta, HFile.Reader reader)
    throws IllegalArgumentException, IOException {
  int version = meta.readInt();
  switch (version) {
    case ByteBloomFilter.VERSION:
      // This is only possible in a version 1 HFile. We are ignoring the
      // passed comparator because raw byte comparators are always used
      // in version 1 Bloom filters.
      return new ByteBloomFilter(meta);

    case CompoundBloomFilterBase.VERSION:
      return new CompoundBloomFilter(meta, reader);

    default:
      throw new IllegalArgumentException(
        "Bad bloom filter format version " + version
      );
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:32,代码来源:BloomFilterFactory.java

示例4: createDeleteBloomAtWrite

import org.apache.hadoop.hbase.io.hfile.HFile; //导入依赖的package包/类
/**
 * Creates a new Delete Family Bloom filter at the time of
 * {@link org.apache.hadoop.hbase.regionserver.StoreFile} writing.
 * @param conf
 * @param cacheConf
 * @param maxKeys an estimate of the number of keys we expect to insert.
 *        Irrelevant if compound Bloom filters are enabled.
 * @param writer the HFile writer
 * @return the new Bloom filter, or null in case Bloom filters are disabled
 *         or when failed to create one.
 */
public static BloomFilterWriter createDeleteBloomAtWrite(Configuration conf,
    CacheConfig cacheConf, int maxKeys, HFile.Writer writer) {
  if (!isDeleteFamilyBloomEnabled(conf)) {
    LOG.info("Delete Bloom filters are disabled by configuration for "
        + writer.getPath()
        + (conf == null ? " (configuration is null)" : ""));
    return null;
  }

  float err = getErrorRate(conf);

  int maxFold = getMaxFold(conf);
  // In case of compound Bloom filters we ignore the maxKeys hint.
  CompoundBloomFilterWriter bloomWriter = new CompoundBloomFilterWriter(getBloomBlockSize(conf),
      err, Hash.getHashType(conf), maxFold, cacheConf.shouldCacheBloomsOnWrite(),
      KeyValue.RAW_COMPARATOR);
  writer.addInlineBlockWriter(bloomWriter);
  return bloomWriter;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:31,代码来源:BloomFilterFactory.java

示例5: getOldestHfileTs

import org.apache.hadoop.hbase.io.hfile.HFile; //导入依赖的package包/类
@Override public long getOldestHfileTs(boolean majorCompactioOnly) throws IOException {
  long result = Long.MAX_VALUE;
  for (Store store : getStores()) {
    Collection<StoreFile> storeFiles = store.getStorefiles();
    if (storeFiles == null) continue;
    for (StoreFile file : storeFiles) {
      StoreFile.Reader sfReader = file.getReader();
      if (sfReader == null) continue;
      HFile.Reader reader = sfReader.getHFileReader();
      if (reader == null) continue;
      if (majorCompactioOnly) {
        byte[] val = reader.loadFileInfo().get(StoreFile.MAJOR_COMPACTION_KEY);
        if (val == null) continue;
        if (val == null || !Bytes.toBoolean(val)) {
          continue;
        }
      }
      result = Math.min(result, reader.getFileContext().getFileCreateTime());
    }
  }
  return result == Long.MAX_VALUE ? 0 : result;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:HRegion.java

示例6: createHFile

import org.apache.hadoop.hbase.io.hfile.HFile; //导入依赖的package包/类
private void createHFile(Path path,
    byte[] family, byte[] qualifier,
    byte[] startKey, byte[] endKey, int numRows) throws IOException {

  HFile.Writer writer = null;
  long now = System.currentTimeMillis();
  try {
    HFileContext context = new HFileContextBuilder().build();
    writer = HFile.getWriterFactory(conf, new CacheConfig(conf))
        .withPath(fs, path)
        .withFileContext(context)
        .create();
    // subtract 2 since numRows doesn't include boundary keys
    for (byte[] key : Bytes.iterateOnSplits(startKey, endKey, true, numRows-2)) {
      KeyValue kv = new KeyValue(key, family, qualifier, now, key);
      writer.append(kv);
    }
  } finally {
    if(writer != null)
      writer.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:TestAccessController.java

示例7: setUp

import org.apache.hadoop.hbase.io.hfile.HFile; //导入依赖的package包/类
@Override
void setUp() throws Exception {

  HFileContextBuilder builder = new HFileContextBuilder()
      .withCompression(AbstractHFileWriter.compressionByName(codec))
      .withBlockSize(RFILE_BLOCKSIZE);
  
  if (cipher == "aes") {
    byte[] cipherKey = new byte[AES.KEY_LENGTH];
    new SecureRandom().nextBytes(cipherKey);
    builder.withEncryptionContext(Encryption.newContext(conf)
        .setCipher(Encryption.getCipher(conf, cipher))
        .setKey(cipherKey));
  } else if (!"none".equals(cipher)) {
    throw new IOException("Cipher " + cipher + " not supported.");
  }
  
  HFileContext hFileContext = builder.build();

  writer = HFile.getWriterFactoryNoCache(conf)
      .withPath(fs, mf)
      .withFileContext(hFileContext)
      .withComparator(new KeyValue.RawBytesComparator())
      .create();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:26,代码来源:HFilePerformanceEvaluation.java

示例8: createHFile

import org.apache.hadoop.hbase.io.hfile.HFile; //导入依赖的package包/类
private static void createHFile(
    Configuration conf,
    FileSystem fs, Path path,
    byte[] family, byte[] qualifier) throws IOException {
  HFileContext context = new HFileContextBuilder().build();
  HFile.Writer writer = HFile.getWriterFactory(conf, new CacheConfig(conf))
      .withPath(fs, path)
      .withFileContext(context)
      .create();
  long now = System.currentTimeMillis();
  try {
    for (int i =1;i<=9;i++) {
      KeyValue kv = new KeyValue(Bytes.toBytes(i+""), family, qualifier, now, Bytes.toBytes(i+""));
      writer.append(kv);
    }
  } finally {
    writer.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:TestRegionObserverInterface.java

示例9: createHFile

import org.apache.hadoop.hbase.io.hfile.HFile; //导入依赖的package包/类
/**
 * Create an HFile with the given number of rows with a specified value.
 */
public static void createHFile(FileSystem fs, Path path, byte[] family,
    byte[] qualifier, byte[] value, int numRows) throws IOException {
  HFileContext context = new HFileContextBuilder().withBlockSize(BLOCKSIZE)
                          .withCompression(COMPRESSION)
                          .build();
  HFile.Writer writer = HFile
      .getWriterFactory(conf, new CacheConfig(conf))
      .withPath(fs, path)
      .withFileContext(context)
      .create();
  long now = System.currentTimeMillis();
  try {
    // subtract 2 since iterateOnSplits doesn't include boundary keys
    for (int i = 0; i < numRows; i++) {
      KeyValue kv = new KeyValue(rowkey(i), family, qualifier, now, value);
      writer.append(kv);
    }
    writer.appendFileInfo(StoreFile.BULKLOAD_TIME_KEY, Bytes.toBytes(now));
  } finally {
    writer.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:26,代码来源:TestHRegionServerBulkLoad.java

示例10: createHFileForFamilies

import org.apache.hadoop.hbase.io.hfile.HFile; //导入依赖的package包/类
private String createHFileForFamilies(byte[] family) throws IOException {
  HFile.WriterFactory hFileFactory = HFile.getWriterFactoryNoCache(conf);
  // TODO We need a way to do this without creating files
  File hFileLocation = testFolder.newFile();
  FSDataOutputStream out = new FSDataOutputStream(new FileOutputStream(hFileLocation));
  try {
    hFileFactory.withOutputStream(out);
    hFileFactory.withFileContext(new HFileContext());
    HFile.Writer writer = hFileFactory.create();
    try {
      writer.append(new KeyValue(CellUtil.createCell(randomBytes,
          family,
          randomBytes,
          0l,
          KeyValue.Type.Put.getCode(),
          randomBytes)));
    } finally {
      writer.close();
    }
  } finally {
    out.close();
  }
  return hFileLocation.getAbsoluteFile().getAbsolutePath();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:25,代码来源:TestBulkLoad.java

示例11: createHFileForFamilies

import org.apache.hadoop.hbase.io.hfile.HFile; //导入依赖的package包/类
private String createHFileForFamilies(Path testPath, byte[] family,
    byte[] valueBytes) throws IOException {
  HFile.WriterFactory hFileFactory = HFile.getWriterFactoryNoCache(TEST_UTIL.getConfiguration());
  // TODO We need a way to do this without creating files
  Path testFile = new Path(testPath, UUID.randomUUID().toString());
  FSDataOutputStream out = TEST_UTIL.getTestFileSystem().create(testFile);
  try {
    hFileFactory.withOutputStream(out);
    hFileFactory.withFileContext(new HFileContext());
    HFile.Writer writer = hFileFactory.create();
    try {
      writer.append(new KeyValue(CellUtil.createCell(valueBytes, family, valueBytes, 0l,
        KeyValue.Type.Put.getCode(), valueBytes)));
    } finally {
      writer.close();
    }
  } finally {
    out.close();
  }
  return testFile.toString();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:TestHRegionReplayEvents.java

示例12: testCreateWriter

import org.apache.hadoop.hbase.io.hfile.HFile; //导入依赖的package包/类
/**
 * Verify that compression and data block encoding are respected by the
 * Store.createWriterInTmp() method, used on store flush.
 */
@Test
public void testCreateWriter() throws Exception {
  Configuration conf = HBaseConfiguration.create();
  FileSystem fs = FileSystem.get(conf);

  HColumnDescriptor hcd = new HColumnDescriptor(family);
  hcd.setCompressionType(Compression.Algorithm.GZ);
  hcd.setDataBlockEncoding(DataBlockEncoding.DIFF);
  init(name.getMethodName(), conf, hcd);

  // Test createWriterInTmp()
  StoreFile.Writer writer = store.createWriterInTmp(4, hcd.getCompression(), false, true, false);
  Path path = writer.getPath();
  writer.append(new KeyValue(row, family, qf1, Bytes.toBytes(1)));
  writer.append(new KeyValue(row, family, qf2, Bytes.toBytes(2)));
  writer.append(new KeyValue(row2, family, qf1, Bytes.toBytes(3)));
  writer.append(new KeyValue(row2, family, qf2, Bytes.toBytes(4)));
  writer.close();

  // Verify that compression and encoding settings are respected
  HFile.Reader reader = HFile.createReader(fs, path, new CacheConfig(conf), conf);
  Assert.assertEquals(hcd.getCompressionType(), reader.getCompressionAlgorithm());
  Assert.assertEquals(hcd.getDataBlockEncoding(), reader.getDataBlockEncoding());
  reader.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:30,代码来源:TestStore.java

示例13: extractHFileKey

import org.apache.hadoop.hbase.io.hfile.HFile; //导入依赖的package包/类
private static byte[] extractHFileKey(Path path) throws Exception {
  HFile.Reader reader = HFile.createReader(TEST_UTIL.getTestFileSystem(), path,
    new CacheConfig(conf), conf);
  try {
    reader.loadFileInfo();
    Encryption.Context cryptoContext = reader.getFileContext().getEncryptionContext();
    assertNotNull("Reader has a null crypto context", cryptoContext);
    Key key = cryptoContext.getKey();
    if (key == null) {
      return null;
    }
    return key.getEncoded();
  } finally {
    reader.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:17,代码来源:TestEncryptionRandomKeying.java

示例14: setUpCluster

import org.apache.hadoop.hbase.io.hfile.HFile; //导入依赖的package包/类
@Override
public void setUpCluster() throws Exception {
  util = getTestingUtil(null);
  Configuration conf = util.getConfiguration();
  if (!util.isDistributedCluster()) {
    // Inject required configuration if we are not running in distributed mode
    conf.setInt(HFile.FORMAT_VERSION_KEY, 3);
    conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, KeyProviderForTesting.class.getName());
    conf.set(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, "hbase");
    conf.setClass("hbase.regionserver.hlog.reader.impl", SecureProtobufLogReader.class,
      Reader.class);
    conf.setClass("hbase.regionserver.hlog.writer.impl", SecureProtobufLogWriter.class,
      Writer.class);
    conf.setBoolean(HConstants.ENABLE_WAL_ENCRYPTION, true);
  }
  // Check if the cluster configuration can support this test
  try {
    EncryptionTest.testEncryption(conf, "AES", null);
  } catch (Exception e) {
    LOG.warn("Encryption configuration test did not pass, skipping test");
    return;
  }
  super.setUpCluster();
  initialized = true;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:26,代码来源:IntegrationTestIngestWithEncryption.java

示例15: setUpCluster

import org.apache.hadoop.hbase.io.hfile.HFile; //导入依赖的package包/类
@Override
public void setUpCluster() throws Exception {
  util = getTestingUtil(null);
  Configuration conf = util.getConfiguration();
  conf.setInt(HFile.FORMAT_VERSION_KEY, 3);
  conf.set("hbase.coprocessor.master.classes", VisibilityController.class.getName());
  conf.set("hbase.coprocessor.region.classes", VisibilityController.class.getName());
  conf.set("hbase.superuser", User.getCurrent().getName());
  conf.setBoolean("dfs.permissions", false);
  super.setUpCluster();
  String[] users = userNames.split(",");
  if (users.length != 2) {
    System.err.println(ERROR_STR);
    throw new IOException(ERROR_STR);
  }
  System.out.println(userNames + " "+users[0]+ " "+users[1]);
  USER1 = User.createUserForTesting(conf, users[0], new String[] {});
  USER2 = User.createUserForTesting(conf, users[1], new String[] {});
  addLabelsAndAuths();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:21,代码来源:IntegrationTestWithCellVisibilityLoadAndVerify.java


注:本文中的org.apache.hadoop.hbase.io.hfile.HFile类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。