当前位置: 首页>>代码示例>>Java>>正文


Java HFile.getFormatVersion方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.io.hfile.HFile.getFormatVersion方法的典型用法代码示例。如果您正苦于以下问题:Java HFile.getFormatVersion方法的具体用法?Java HFile.getFormatVersion怎么用?Java HFile.getFormatVersion使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.io.hfile.HFile的用法示例。


在下文中一共展示了HFile.getFormatVersion方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: start

import org.apache.hadoop.hbase.io.hfile.HFile; //导入方法依赖的package包/类
@Override
public void start(CoprocessorEnvironment env) throws IOException {
  this.conf = env.getConfiguration();

  authorizationEnabled = isAuthorizationSupported(conf);
  if (!authorizationEnabled) {
    LOG.warn("The VisibilityController has been loaded with authorization checks disabled.");
  }

  if (HFile.getFormatVersion(conf) < HFile.MIN_FORMAT_VERSION_WITH_TAGS) {
    throw new RuntimeException("A minimum HFile version of " + HFile.MIN_FORMAT_VERSION_WITH_TAGS
      + " is required to persist visibility labels. Consider setting " + HFile.FORMAT_VERSION_KEY
      + " accordingly.");
  }

  if (env instanceof RegionServerCoprocessorEnvironment) {
    throw new RuntimeException("Visibility controller should not be configured as "
        + "'hbase.coprocessor.regionserver.classes'.");
  }
  // Do not create for master CPs
  if (!(env instanceof MasterCoprocessorEnvironment)) {
    visibilityLabelService = VisibilityLabelServiceManager.getInstance()
        .getVisibilityLabelService(this.conf);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:26,代码来源:VisibilityController.java

示例2: createDeleteBloomAtWrite

import org.apache.hadoop.hbase.io.hfile.HFile; //导入方法依赖的package包/类
/**
 * Creates a new Delete Family Bloom filter at the time of
 * {@link org.apache.hadoop.hbase.regionserver.StoreFile} writing.
 * @param conf
 * @param cacheConf
 * @param maxKeys an estimate of the number of keys we expect to insert.
 *        Irrelevant if compound Bloom filters are enabled.
 * @param writer the HFile writer
 * @return the new Bloom filter, or null in case Bloom filters are disabled
 *         or when failed to create one.
 */
public static BloomFilterWriter createDeleteBloomAtWrite(Configuration conf,
    CacheConfig cacheConf, int maxKeys, HFile.Writer writer) {
  if (!isDeleteFamilyBloomEnabled(conf)) {
    LOG.info("Delete Bloom filters are disabled by configuration for "
        + writer.getPath()
        + (conf == null ? " (configuration is null)" : ""));
    return null;
  }

  float err = getErrorRate(conf);

  if (HFile.getFormatVersion(conf) > HFile.MIN_FORMAT_VERSION) {
    int maxFold = getMaxFold(conf);
    // In case of compound Bloom filters we ignore the maxKeys hint.
    CompoundBloomFilterWriter bloomWriter = new CompoundBloomFilterWriter(
        getBloomBlockSize(conf), err, Hash.getHashType(conf),
        maxFold,
        cacheConf.shouldCacheBloomsOnWrite(), Bytes.BYTES_RAWCOMPARATOR);
    writer.addInlineBlockWriter(bloomWriter);
    return bloomWriter;
  } else {
    LOG.info("Delete Family Bloom filter is not supported in HFile V1");
    return null;
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:37,代码来源:BloomFilterFactory.java

示例3: start

import org.apache.hadoop.hbase.io.hfile.HFile; //导入方法依赖的package包/类
@Override
public void start(CoprocessorEnvironment env) throws IOException {
  this.conf = env.getConfiguration();
  if (HFile.getFormatVersion(conf) < HFile.MIN_FORMAT_VERSION_WITH_TAGS) {
    throw new RuntimeException("A minimum HFile version of " + HFile.MIN_FORMAT_VERSION_WITH_TAGS
      + " is required to persist visibility labels. Consider setting " + HFile.FORMAT_VERSION_KEY
      + " accordingly.");
  }

  if (env instanceof RegionServerCoprocessorEnvironment) {
    throw new RuntimeException("Visibility controller should not be configured as "
        + "'hbase.coprocessor.regionserver.classes'.");
  }
  // Do not create for master CPs
  if (!(env instanceof MasterCoprocessorEnvironment)) {
    visibilityLabelService = VisibilityLabelServiceManager.getInstance()
        .getVisibilityLabelService(this.conf);
  }
  Pair<List<String>, List<String>> superUsersAndGroups =
      VisibilityUtils.getSystemAndSuperUsers(this.conf);
  this.superUsers = superUsersAndGroups.getFirst();
  this.superGroups = superUsersAndGroups.getSecond();
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:24,代码来源:VisibilityController.java

示例4: start

import org.apache.hadoop.hbase.io.hfile.HFile; //导入方法依赖的package包/类
@Override
public void start(CoprocessorEnvironment env) throws IOException {
  this.conf = env.getConfiguration();
  if (HFile.getFormatVersion(conf) < HFile.MIN_FORMAT_VERSION_WITH_TAGS) {
    throw new RuntimeException("A minimum HFile version of " + HFile.MIN_FORMAT_VERSION_WITH_TAGS
      + " is required to persist visibility labels. Consider setting " + HFile.FORMAT_VERSION_KEY
      + " accordingly.");
  }
  ZooKeeperWatcher zk = null;
  if (env instanceof MasterCoprocessorEnvironment) {
    // if running on HMaster
    MasterCoprocessorEnvironment mEnv = (MasterCoprocessorEnvironment) env;
    zk = mEnv.getMasterServices().getZooKeeper();
  } else if (env instanceof RegionCoprocessorEnvironment) {
    // if running at region
    regionEnv = (RegionCoprocessorEnvironment) env;
    zk = regionEnv.getRegionServerServices().getZooKeeper();
  } else if (env instanceof RegionServerCoprocessorEnvironment) {
    throw new RuntimeException(
        "Visibility controller should not be configured as " +
        "'hbase.coprocessor.regionserver.classes'.");
  }

  // If zk is null or IOException while obtaining auth manager,
  // throw RuntimeException so that the coprocessor is unloaded.
  if (zk == null) {
    throw new RuntimeException("Error obtaining VisibilityLabelsManager, zk found null.");
  }
  try {
    this.visibilityManager = VisibilityLabelsManager.get(zk, this.conf);
  } catch (IOException ioe) {
    throw new RuntimeException("Error obtaining VisibilityLabelsManager", ioe);
  }
  if (env instanceof RegionCoprocessorEnvironment) {
    // ScanLabelGenerator to be instantiated only with Region Observer.
    scanLabelGenerators = VisibilityUtils.getScanLabelGenerators(this.conf);
  }
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:39,代码来源:VisibilityController.java

示例5: isCellAuthorizationSupported

import org.apache.hadoop.hbase.io.hfile.HFile; //导入方法依赖的package包/类
public static boolean isCellAuthorizationSupported(Configuration conf) {
  return isAuthorizationSupported(conf) &&
      (HFile.getFormatVersion(conf) >= HFile.MIN_FORMAT_VERSION_WITH_TAGS);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:5,代码来源:AccessController.java

示例6: start

import org.apache.hadoop.hbase.io.hfile.HFile; //导入方法依赖的package包/类
@Override
public void start(CoprocessorEnvironment env) throws IOException {
  CompoundConfiguration conf = new CompoundConfiguration();
  conf.add(env.getConfiguration());

  authorizationEnabled = isAuthorizationSupported(conf);
  if (!authorizationEnabled) {
    LOG.warn("The AccessController has been loaded with authorization checks disabled.");
  }

  shouldCheckExecPermission = conf.getBoolean(AccessControlConstants.EXEC_PERMISSION_CHECKS_KEY,
    AccessControlConstants.DEFAULT_EXEC_PERMISSION_CHECKS);

  cellFeaturesEnabled = (HFile.getFormatVersion(conf) >= HFile.MIN_FORMAT_VERSION_WITH_TAGS);
  if (!cellFeaturesEnabled) {
    LOG.info("A minimum HFile version of " + HFile.MIN_FORMAT_VERSION_WITH_TAGS
        + " is required to persist cell ACLs. Consider setting " + HFile.FORMAT_VERSION_KEY
        + " accordingly.");
  }

  ZooKeeperWatcher zk = null;
  if (env instanceof MasterCoprocessorEnvironment) {
    // if running on HMaster
    MasterCoprocessorEnvironment mEnv = (MasterCoprocessorEnvironment) env;
    zk = mEnv.getMasterServices().getZooKeeper();
  } else if (env instanceof RegionServerCoprocessorEnvironment) {
    RegionServerCoprocessorEnvironment rsEnv = (RegionServerCoprocessorEnvironment) env;
    zk = rsEnv.getRegionServerServices().getZooKeeper();
  } else if (env instanceof RegionCoprocessorEnvironment) {
    // if running at region
    regionEnv = (RegionCoprocessorEnvironment) env;
    conf.addStringMap(regionEnv.getRegion().getTableDesc().getConfiguration());
    zk = regionEnv.getRegionServerServices().getZooKeeper();
    compatibleEarlyTermination = conf.getBoolean(AccessControlConstants.CF_ATTRIBUTE_EARLY_OUT,
      AccessControlConstants.DEFAULT_ATTRIBUTE_EARLY_OUT);
  }

  // set the user-provider.
  this.userProvider = UserProvider.instantiate(env.getConfiguration());

  // If zk is null or IOException while obtaining auth manager,
  // throw RuntimeException so that the coprocessor is unloaded.
  if (zk != null) {
    try {
      this.authManager = TableAuthManager.get(zk, env.getConfiguration());
    } catch (IOException ioe) {
      throw new RuntimeException("Error obtaining TableAuthManager", ioe);
    }
  } else {
    throw new RuntimeException("Error obtaining TableAuthManager, zk found null.");
  }

  tableAcls = new MapMaker().weakValues().makeMap();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:55,代码来源:AccessController.java

示例7: createGeneralBloomAtWrite

import org.apache.hadoop.hbase.io.hfile.HFile; //导入方法依赖的package包/类
/**
 * Creates a new general (Row or RowCol) Bloom filter at the time of
 * {@link org.apache.hadoop.hbase.regionserver.StoreFile} writing.
 *
 * @param conf
 * @param cacheConf
 * @param bloomType
 * @param maxKeys an estimate of the number of keys we expect to insert.
 *        Irrelevant if compound Bloom filters are enabled.
 * @param writer the HFile writer
 * @return the new Bloom filter, or null in case Bloom filters are disabled
 *         or when failed to create one.
 */
public static BloomFilterWriter createGeneralBloomAtWrite(Configuration conf,
    CacheConfig cacheConf, BloomType bloomType, int maxKeys,
    HFile.Writer writer) {
  if (!isGeneralBloomEnabled(conf)) {
    LOG.trace("Bloom filters are disabled by configuration for "
        + writer.getPath()
        + (conf == null ? " (configuration is null)" : ""));
    return null;
  } else if (bloomType == BloomType.NONE) {
    LOG.trace("Bloom filter is turned off for the column family");
    return null;
  }

  float err = getErrorRate(conf);

  // In case of row/column Bloom filter lookups, each lookup is an OR if two
  // separate lookups. Therefore, if each lookup's false positive rate is p,
  // the resulting false positive rate is err = 1 - (1 - p)^2, and
  // p = 1 - sqrt(1 - err).
  if (bloomType == BloomType.ROWCOL) {
    err = (float) (1 - Math.sqrt(1 - err));
  }

  int maxFold = conf.getInt(IO_STOREFILE_BLOOM_MAX_FOLD,
      MAX_ALLOWED_FOLD_FACTOR);

  // Do we support compound bloom filters?
  if (HFile.getFormatVersion(conf) > HFile.MIN_FORMAT_VERSION) {
    // In case of compound Bloom filters we ignore the maxKeys hint.
    CompoundBloomFilterWriter bloomWriter = new CompoundBloomFilterWriter(
        getBloomBlockSize(conf), err, Hash.getHashType(conf), maxFold,
        cacheConf.shouldCacheBloomsOnWrite(), bloomType == BloomType.ROWCOL
            ? KeyValue.KEY_COMPARATOR : Bytes.BYTES_RAWCOMPARATOR);
    writer.addInlineBlockWriter(bloomWriter);
    return bloomWriter;
  } else {
    // A single-block Bloom filter. Only used when testing HFile format
    // version 1.
    int tooBig = conf.getInt(IO_STOREFILE_BLOOM_MAX_KEYS,
        128 * 1000 * 1000);

    if (maxKeys <= 0) {
      LOG.warn("Invalid maximum number of keys specified: " + maxKeys
          + ", not using Bloom filter");
      return null;
    } else if (maxKeys < tooBig) {
      BloomFilterWriter bloom = new ByteBloomFilter((int) maxKeys, err,
          Hash.getHashType(conf), maxFold);
      bloom.allocBloom();
      return bloom;
    } else {
      LOG.debug("Skipping bloom filter because max keysize too large: "
          + maxKeys);
    }
  }
  return null;
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:71,代码来源:BloomFilterFactory.java

示例8: start

import org.apache.hadoop.hbase.io.hfile.HFile; //导入方法依赖的package包/类
@Override
public void start(CoprocessorEnvironment env) throws IOException {
  CompoundConfiguration conf = new CompoundConfiguration();
  conf.add(env.getConfiguration());

  shouldCheckExecPermission = conf.getBoolean(AccessControlConstants.EXEC_PERMISSION_CHECKS_KEY,
    AccessControlConstants.DEFAULT_EXEC_PERMISSION_CHECKS);

  cellFeaturesEnabled = HFile.getFormatVersion(conf) >= HFile.MIN_FORMAT_VERSION_WITH_TAGS;
  if (!cellFeaturesEnabled) {
    LOG.info("A minimum HFile version of " + HFile.MIN_FORMAT_VERSION_WITH_TAGS
        + " is required to persist cell ACLs. Consider setting " + HFile.FORMAT_VERSION_KEY
        + " accordingly.");
  }

  ZooKeeperWatcher zk = null;
  if (env instanceof MasterCoprocessorEnvironment) {
    // if running on HMaster
    MasterCoprocessorEnvironment mEnv = (MasterCoprocessorEnvironment) env;
    zk = mEnv.getMasterServices().getZooKeeper();
  } else if (env instanceof RegionServerCoprocessorEnvironment) {
    RegionServerCoprocessorEnvironment rsEnv = (RegionServerCoprocessorEnvironment) env;
    zk = rsEnv.getRegionServerServices().getZooKeeper();
  } else if (env instanceof RegionCoprocessorEnvironment) {
    // if running at region
    regionEnv = (RegionCoprocessorEnvironment) env;
    conf.addStringMap(regionEnv.getRegion().getTableDesc().getConfiguration());
    zk = regionEnv.getRegionServerServices().getZooKeeper();
    compatibleEarlyTermination = conf.getBoolean(AccessControlConstants.CF_ATTRIBUTE_EARLY_OUT,
      AccessControlConstants.DEFAULT_ATTRIBUTE_EARLY_OUT);
  }

  // set the user-provider.
  this.userProvider = UserProvider.instantiate(env.getConfiguration());

  // set up the list of users with superuser privilege
  User user = userProvider.getCurrent();
  superusers = Lists.asList(user.getShortName(),
    conf.getStrings(AccessControlLists.SUPERUSER_CONF_KEY, new String[0]));

  // If zk is null or IOException while obtaining auth manager,
  // throw RuntimeException so that the coprocessor is unloaded.
  if (zk != null) {
    try {
      this.authManager = TableAuthManager.get(zk, env.getConfiguration());
    } catch (IOException ioe) {
      throw new RuntimeException("Error obtaining TableAuthManager", ioe);
    }
  } else {
    throw new RuntimeException("Error obtaining TableAuthManager, zk found null.");
  }

  tableAcls = new MapMaker().weakValues().makeMap();
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:55,代码来源:AccessController.java

示例9: start

import org.apache.hadoop.hbase.io.hfile.HFile; //导入方法依赖的package包/类
public void start(CoprocessorEnvironment env) throws IOException {
  CompoundConfiguration conf = new CompoundConfiguration();
  conf.add(env.getConfiguration());

  shouldCheckExecPermission = conf.getBoolean(AccessControlConstants.EXEC_PERMISSION_CHECKS_KEY,
    AccessControlConstants.DEFAULT_EXEC_PERMISSION_CHECKS);

  cellFeaturesEnabled = HFile.getFormatVersion(conf) >= HFile.MIN_FORMAT_VERSION_WITH_TAGS;
  if (!cellFeaturesEnabled) {
    LOG.info("A minimum HFile version of " + HFile.MIN_FORMAT_VERSION_WITH_TAGS
        + " is required to persist cell ACLs. Consider setting " + HFile.FORMAT_VERSION_KEY
        + " accordingly.");
  }

  ZooKeeperWatcher zk = null;
  if (env instanceof MasterCoprocessorEnvironment) {
    // if running on HMaster
    MasterCoprocessorEnvironment mEnv = (MasterCoprocessorEnvironment) env;
    zk = mEnv.getMasterServices().getZooKeeper();
  } else if (env instanceof RegionServerCoprocessorEnvironment) {      
    RegionServerCoprocessorEnvironment rsEnv = (RegionServerCoprocessorEnvironment) env;
    zk = rsEnv.getRegionServerServices().getZooKeeper();      
  } else if (env instanceof RegionCoprocessorEnvironment) {
    // if running at region
    regionEnv = (RegionCoprocessorEnvironment) env;
    conf.addStringMap(regionEnv.getRegion().getTableDesc().getConfiguration());
    zk = regionEnv.getRegionServerServices().getZooKeeper();
    compatibleEarlyTermination = conf.getBoolean(AccessControlConstants.CF_ATTRIBUTE_EARLY_OUT,
      AccessControlConstants.DEFAULT_ATTRIBUTE_EARLY_OUT);
  }

  // set the user-provider.
  this.userProvider = UserProvider.instantiate(env.getConfiguration());

  // If zk is null or IOException while obtaining auth manager,
  // throw RuntimeException so that the coprocessor is unloaded.
  if (zk != null) {
    try {
      this.authManager = TableAuthManager.get(zk, env.getConfiguration());
    } catch (IOException ioe) {
      throw new RuntimeException("Error obtaining TableAuthManager", ioe);
    }
  } else {
    throw new RuntimeException("Error obtaining TableAuthManager, zk found null.");
  }
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:47,代码来源:AccessController.java

示例10: replay

import org.apache.hadoop.hbase.io.hfile.HFile; //导入方法依赖的package包/类
/**
 * Replay the given changes when distributedLogReplay WAL edits from a failed RS. The guarantee is
 * that the given mutations will be durable on the receiving RS if this method returns without any
 * exception.
 * @param controller the RPC controller
 * @param request the request
 * @throws ServiceException
 */
@Override
@QosPriority(priority = HConstants.REPLAY_QOS)
public ReplicateWALEntryResponse replay(final RpcController controller,
    final ReplicateWALEntryRequest request) throws ServiceException {
  long before = EnvironmentEdgeManager.currentTimeMillis();
  CellScanner cells = ((PayloadCarryingRpcController) controller).cellScanner();
  try {
    checkOpen();
    List<WALEntry> entries = request.getEntryList();
    if (entries == null || entries.isEmpty()) {
      // empty input
      return ReplicateWALEntryResponse.newBuilder().build();
    }
    HRegion region = this.getRegionByEncodedName(
      entries.get(0).getKey().getEncodedRegionName().toStringUtf8());
    RegionCoprocessorHost coprocessorHost = region.getCoprocessorHost();
    List<Pair<HLogKey, WALEdit>> walEntries = new ArrayList<Pair<HLogKey, WALEdit>>();
    List<HLogSplitter.MutationReplay> mutations = new ArrayList<HLogSplitter.MutationReplay>();
    // when tag is enabled, we need tag replay edits with log sequence number
    boolean needAddReplayTag = (HFile.getFormatVersion(this.conf) >= 3);
    for (WALEntry entry : entries) {
      if (nonceManager != null) {
        long nonceGroup = entry.getKey().hasNonceGroup()
            ? entry.getKey().getNonceGroup() : HConstants.NO_NONCE;
        long nonce = entry.getKey().hasNonce() ? entry.getKey().getNonce() : HConstants.NO_NONCE;
        nonceManager.reportOperationFromWal(nonceGroup, nonce, entry.getKey().getWriteTime());
      }
      Pair<HLogKey, WALEdit> walEntry = (coprocessorHost == null) ? null :
        new Pair<HLogKey, WALEdit>();
      List<HLogSplitter.MutationReplay> edits = HLogSplitter.getMutationsFromWALEntry(entry,
        cells, walEntry, needAddReplayTag);
      if (coprocessorHost != null) {
        // Start coprocessor replay here. The coprocessor is for each WALEdit instead of a
        // KeyValue.
        if (coprocessorHost.preWALRestore(region.getRegionInfo(), walEntry.getFirst(),
          walEntry.getSecond())) {
          // if bypass this log entry, ignore it ...
          continue;
        }
        walEntries.add(walEntry);
      }
      mutations.addAll(edits);
    }

    if (!mutations.isEmpty()) {
      OperationStatus[] result = doReplayBatchOp(region, mutations);
      // check if it's a partial success
      for (int i = 0; result != null && i < result.length; i++) {
        if (result[i] != OperationStatus.SUCCESS) {
          throw new IOException(result[i].getExceptionMsg());
        }
      }
    }
    if (coprocessorHost != null) {
      for (Pair<HLogKey, WALEdit> wal : walEntries) {
        coprocessorHost.postWALRestore(region.getRegionInfo(), wal.getFirst(),
          wal.getSecond());
      }
    }
    return ReplicateWALEntryResponse.newBuilder().build();
  } catch (IOException ie) {
    throw new ServiceException(ie);
  } finally {
    metricsRegionServer.updateReplay(EnvironmentEdgeManager.currentTimeMillis() - before);
  }
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:75,代码来源:HRegionServer.java


注:本文中的org.apache.hadoop.hbase.io.hfile.HFile.getFormatVersion方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。