当前位置: 首页>>代码示例>>Java>>正文


Java Counter类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Counter的典型用法代码示例。如果您正苦于以下问题:Java Counter类的具体用法?Java Counter怎么用?Java Counter使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


Counter类属于org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress包,在下文中一共展示了Counter类的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: saveCurrentTokens

import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Counter; //导入依赖的package包/类
/**
 * Private helper methods to save delegation keys and tokens in fsimage
 */
private synchronized void saveCurrentTokens(DataOutputStream out,
    String sdPath) throws IOException {
  StartupProgress prog = NameNode.getStartupProgress();
  Step step = new Step(StepType.DELEGATION_TOKENS, sdPath);
  prog.beginStep(Phase.SAVING_CHECKPOINT, step);
  prog.setTotal(Phase.SAVING_CHECKPOINT, step, currentTokens.size());
  Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
  out.writeInt(currentTokens.size());
  Iterator<DelegationTokenIdentifier> iter = currentTokens.keySet()
      .iterator();
  while (iter.hasNext()) {
    DelegationTokenIdentifier id = iter.next();
    id.write(out);
    DelegationTokenInformation info = currentTokens.get(id);
    out.writeLong(info.getRenewDate());
    counter.increment();
  }
  prog.endStep(Phase.SAVING_CHECKPOINT, step);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:DelegationTokenSecretManager.java

示例2: saveAllKeys

import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Counter; //导入依赖的package包/类
private synchronized void saveAllKeys(DataOutputStream out, String sdPath)
    throws IOException {
  StartupProgress prog = NameNode.getStartupProgress();
  Step step = new Step(StepType.DELEGATION_KEYS, sdPath);
  prog.beginStep(Phase.SAVING_CHECKPOINT, step);
  prog.setTotal(Phase.SAVING_CHECKPOINT, step, currentTokens.size());
  Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
  out.writeInt(allKeys.size());
  Iterator<Integer> iter = allKeys.keySet().iterator();
  while (iter.hasNext()) {
    Integer key = iter.next();
    allKeys.get(key).write(out);
    counter.increment();
  }
  prog.endStep(Phase.SAVING_CHECKPOINT, step);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:DelegationTokenSecretManager.java

示例3: loadCurrentTokens

import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Counter; //导入依赖的package包/类
/**
 * Private helper methods to load Delegation tokens from fsimage
 */
private synchronized void loadCurrentTokens(DataInput in)
    throws IOException {
  StartupProgress prog = NameNode.getStartupProgress();
  Step step = new Step(StepType.DELEGATION_TOKENS);
  prog.beginStep(Phase.LOADING_FSIMAGE, step);
  int numberOfTokens = in.readInt();
  prog.setTotal(Phase.LOADING_FSIMAGE, step, numberOfTokens);
  Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
  for (int i = 0; i < numberOfTokens; i++) {
    DelegationTokenIdentifier id = new DelegationTokenIdentifier();
    id.readFields(in);
    long expiryTime = in.readLong();
    addPersistedDelegationToken(id, expiryTime);
    counter.increment();
  }
  prog.endStep(Phase.LOADING_FSIMAGE, step);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:DelegationTokenSecretManager.java

示例4: loadAllKeys

import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Counter; //导入依赖的package包/类
/**
 * Private helper method to load delegation keys from fsimage.
 * @throws IOException on error
 */
private synchronized void loadAllKeys(DataInput in) throws IOException {
  StartupProgress prog = NameNode.getStartupProgress();
  Step step = new Step(StepType.DELEGATION_KEYS);
  prog.beginStep(Phase.LOADING_FSIMAGE, step);
  int numberOfKeys = in.readInt();
  prog.setTotal(Phase.LOADING_FSIMAGE, step, numberOfKeys);
  Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
  for (int i = 0; i < numberOfKeys; i++) {
    DelegationKey value = new DelegationKey();
    value.readFields(in);
    addKey(value);
    counter.increment();
  }
  prog.endStep(Phase.LOADING_FSIMAGE, step);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:DelegationTokenSecretManager.java

示例5: loadDirectives

import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Counter; //导入依赖的package包/类
/**
 * Load cache directives from the fsimage
 */
private void loadDirectives(DataInput in) throws IOException {
  StartupProgress prog = NameNode.getStartupProgress();
  Step step = new Step(StepType.CACHE_ENTRIES);
  prog.beginStep(Phase.LOADING_FSIMAGE, step);
  int numDirectives = in.readInt();
  prog.setTotal(Phase.LOADING_FSIMAGE, step, numDirectives);
  Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
  for (int i = 0; i < numDirectives; i++) {
    CacheDirectiveInfo info = FSImageSerialization.readCacheDirectiveInfo(in);
    // Get pool reference by looking it up in the map
    final String poolName = info.getPool();
    CacheDirective directive =
        new CacheDirective(info.getId(), info.getPath().toUri().getPath(),
            info.getReplication(), info.getExpiration().getAbsoluteMillis());
    addCacheDirective(poolName, directive);
    counter.increment();
  }
  prog.endStep(Phase.LOADING_FSIMAGE, step);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:CacheManager.java

示例6: loadLocalNameINodes

import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Counter; //导入依赖的package包/类
/** 
* load fsimage files assuming only local names are stored. Used when
* snapshots are not supported by the layout version.
*   
* @param numFiles number of files expected to be read
* @param in image input stream
* @param counter Counter to increment for namenode startup progress
* @throws IOException
*/  
private void loadLocalNameINodes(long numFiles, DataInput in, Counter counter)
    throws IOException {
  assert NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.FSIMAGE_NAME_OPTIMIZATION, getLayoutVersion());
  assert numFiles > 0;

  // load root
  loadRoot(in, counter);
  // have loaded the first file (the root)
  numFiles--; 

  // load rest of the nodes directory by directory
  while (numFiles > 0) {
    numFiles -= loadDirectory(in, counter);
  }
  if (numFiles != 0) {
    throw new IOException("Read unexpect number of files: " + -numFiles);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:FSImageFormat.java

示例7: saveChildren

import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Counter; //导入依赖的package包/类
/**
 * Save children INodes.
 * @param children The list of children INodes
 * @param out The DataOutputStream to write
 * @param inSnapshot Whether the parent directory or its ancestor is in
 *                   the deleted list of some snapshot (caused by rename or
 *                   deletion)
 * @param counter Counter to increment for namenode startup progress
 * @return Number of children that are directory
 */
private int saveChildren(ReadOnlyList<INode> children,
    DataOutputStream out, boolean inSnapshot, Counter counter)
    throws IOException {
  // Write normal children INode.
  out.writeInt(children.size());
  int dirNum = 0;
  for(INode child : children) {
    // print all children first
    // TODO: for HDFS-5428, we cannot change the format/content of fsimage
    // here, thus even if the parent directory is in snapshot, we still
    // do not handle INodeUC as those stored in deleted list
    saveINode2Image(child, out, false, referenceMap, counter);
    if (child.isDirectory()) {
      dirNum++;
    } else if (inSnapshot && child.isFile()
        && child.asFile().isUnderConstruction()) {
      this.snapshotUCMap.put(child.getId(), child.asFile());
    }
    if (checkCancelCounter++ % CHECK_CANCEL_INTERVAL == 0) {
      context.checkCancelled();
    }
  }
  return dirNum;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:35,代码来源:FSImageFormat.java

示例8: loadSecretManagerSection

import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Counter; //导入依赖的package包/类
private void loadSecretManagerSection(InputStream in, StartupProgress prog,
    Step currentStep) throws IOException {
  SecretManagerSection s = SecretManagerSection.parseDelimitedFrom(in);
  int numKeys = s.getNumKeys(), numTokens = s.getNumTokens();
  ArrayList<SecretManagerSection.DelegationKey> keys = Lists
      .newArrayListWithCapacity(numKeys);
  ArrayList<SecretManagerSection.PersistToken> tokens = Lists
      .newArrayListWithCapacity(numTokens);

  for (int i = 0; i < numKeys; ++i)
    keys.add(SecretManagerSection.DelegationKey.parseDelimitedFrom(in));

  prog.setTotal(Phase.LOADING_FSIMAGE, currentStep, numTokens);
  Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, currentStep);
  for (int i = 0; i < numTokens; ++i) {
    tokens.add(SecretManagerSection.PersistToken.parseDelimitedFrom(in));
    counter.increment();
  }

  fsn.loadSecretManagerState(s, keys, tokens);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:22,代码来源:FSImageFormatProtobuf.java

示例9: loadCacheManagerSection

import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Counter; //导入依赖的package包/类
private void loadCacheManagerSection(InputStream in, StartupProgress prog,
    Step currentStep) throws IOException {
  CacheManagerSection s = CacheManagerSection.parseDelimitedFrom(in);
  int numPools = s.getNumPools();
  ArrayList<CachePoolInfoProto> pools = Lists
      .newArrayListWithCapacity(numPools);
  ArrayList<CacheDirectiveInfoProto> directives = Lists
      .newArrayListWithCapacity(s.getNumDirectives());
  prog.setTotal(Phase.LOADING_FSIMAGE, currentStep, numPools);
  Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, currentStep);
  for (int i = 0; i < numPools; ++i) {
    pools.add(CachePoolInfoProto.parseDelimitedFrom(in));
    counter.increment();
  }
  for (int i = 0; i < s.getNumDirectives(); ++i)
    directives.add(CacheDirectiveInfoProto.parseDelimitedFrom(in));
  fsn.getCacheManager().loadState(
      new CacheManager.PersistState(s, pools, directives));
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:20,代码来源:FSImageFormatProtobuf.java

示例10: loadINodeSection

import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Counter; //导入依赖的package包/类
void loadINodeSection(InputStream in, StartupProgress prog,
    Step currentStep) throws IOException {
  INodeSection s = INodeSection.parseDelimitedFrom(in);
  fsn.dir.resetLastInodeId(s.getLastInodeId());
  long numInodes = s.getNumInodes();
  LOG.info("Loading " + numInodes + " INodes.");
  prog.setTotal(Phase.LOADING_FSIMAGE, currentStep, numInodes);
  Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, currentStep);
  for (int i = 0; i < numInodes; ++i) {
    INodeSection.INode p = INodeSection.INode.parseDelimitedFrom(in);
    if (p.getId() == INodeId.ROOT_INODE_ID) {
      loadRootINode(p);
    } else {
      INode n = loadINode(p);
      dir.addToInodeMap(n);
    }
    counter.increment();
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:20,代码来源:FSImageFormatPBINode.java

示例11: loadAllKeys

import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Counter; //导入依赖的package包/类
/**
 * Private helper method to load delegation keys from fsimage.
 * @param in
 * @throws IOException
 */
private synchronized void loadAllKeys(DataInput in) throws IOException {
  StartupProgress prog = NameNode.getStartupProgress();
  Step step = new Step(StepType.DELEGATION_KEYS);
  prog.beginStep(Phase.LOADING_FSIMAGE, step);
  int numberOfKeys = in.readInt();
  prog.setTotal(Phase.LOADING_FSIMAGE, step, numberOfKeys);
  Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
  for (int i = 0; i < numberOfKeys; i++) {
    DelegationKey value = new DelegationKey();
    value.readFields(in);
    addKey(value);
    counter.increment();
  }
  prog.endStep(Phase.LOADING_FSIMAGE, step);
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:21,代码来源:DelegationTokenSecretManager.java

示例12: loadLocalNameINodes

import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Counter; //导入依赖的package包/类
/** 
* load fsimage files assuming only local names are stored
*   
* @param numFiles number of files expected to be read
* @param in image input stream
* @param counter Counter to increment for namenode startup progress
* @throws IOException
*/  
private void loadLocalNameINodes(long numFiles, DataInput in, Counter counter)
    throws IOException {
  assert LayoutVersion.supports(Feature.FSIMAGE_NAME_OPTIMIZATION,
      getLayoutVersion());
  assert numFiles > 0;

  // load root
  loadRoot(in, counter);
  // have loaded the first file (the root)
  numFiles--; 

  // load rest of the nodes directory by directory
  while (numFiles > 0) {
    numFiles -= loadDirectory(in, counter);
  }
  if (numFiles != 0) {
    throw new IOException("Read unexpect number of files: " + -numFiles);
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:28,代码来源:FSImageFormat.java

示例13: saveChildren

import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Counter; //导入依赖的package包/类
/**
 * Save children INodes.
 * @param children The list of children INodes
 * @param out The DataOutputStream to write
 * @param counter Counter to increment for namenode startup progress
 * @return Number of children that are directory
 */
private int saveChildren(ReadOnlyList<INode> children, DataOutputStream out,
    Counter counter) throws IOException {
  // Write normal children INode. 
  out.writeInt(children.size());
  int dirNum = 0;
  int i = 0;
  for(INode child : children) {
    // print all children first
    saveINode2Image(child, out, false, referenceMap, counter);
    if (child.isDirectory()) {
      dirNum++;
    }
    if (i++ % 50 == 0) {
      context.checkCancelled();
    }
  }
  return dirNum;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:26,代码来源:FSImageFormat.java


注:本文中的org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Counter类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。