当前位置: 首页>>代码示例>>Java>>正文


Java NameNodeMetrics类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics的典型用法代码示例。如果您正苦于以下问题:Java NameNodeMetrics类的具体用法?Java NameNodeMetrics怎么用?Java NameNodeMetrics使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


NameNodeMetrics类属于org.apache.hadoop.hdfs.server.namenode.metrics包,在下文中一共展示了NameNodeMetrics类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: JournalSet

import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; //导入依赖的package包/类
JournalSet(Configuration conf, FSImage image, NNStorage storage,
    int numJournals, NameNodeMetrics metrics) {
  minimumNumberOfJournals
    = conf.getInt("dfs.name.edits.dir.minimum", 1);
  minimumNumberOfNonLocalJournals 
    = conf.getInt("dfs.name.edits.dir.minimum.nonlocal", 0);
  this.image = image;
  this.storage = storage;
  ThreadFactory namedThreadFactory =
      new ThreadFactoryBuilder()
          .setNameFormat("JournalSet Worker %d")
          .build();
  this.executor = Executors.newFixedThreadPool(numJournals,
      namedThreadFactory);
  this.metrics = metrics;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:17,代码来源:JournalSet.java

示例2: createJournal

import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; //导入依赖的package包/类
/**
 * Construct a custom journal manager.
 * The class to construct is taken from the configuration.
 * @param uri Uri to construct
 * @return The constructed journal manager
 * @throws IllegalArgumentException if no class is configured for uri
 */
public static JournalManager createJournal(Configuration conf, URI uri,
    NamespaceInfo nsInfo, NameNodeMetrics metrics) {
  Class<? extends JournalManager> clazz = getJournalClass(conf,
      uri.getScheme());

  try {
    Constructor<? extends JournalManager> cons = clazz.getConstructor(
        Configuration.class, URI.class, NamespaceInfo.class,
        NameNodeMetrics.class);
    return cons.newInstance(conf, uri, nsInfo, metrics);
  } catch (Exception e) {
    throw new IllegalArgumentException("Unable to construct journal, " + uri,
        e);
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:23,代码来源:FSEditLog.java

示例3: loadFromDisk

import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; //导入依赖的package包/类
/**
 * Instantiates an FSNamesystem loaded from the image and edits
 * directories specified in the passed Configuration.
 *
 * @param conf the Configuration which specifies the storage directories
 *             from which to load
 * @return an FSNamesystem which contains the loaded namespace
 * @throws IOException if loading fails
 */
public static FSNamesystem loadFromDisk(Configuration conf)
    throws IOException {

  checkConfiguration(conf);
  FSImage fsImage = new FSImage(conf,
      FSNamesystem.getNamespaceDirs(conf),
      FSNamesystem.getNamespaceEditsDirs(conf));
  FSNamesystem namesystem = new FSNamesystem(conf, fsImage, false);
  StartupOption startOpt = NameNode.getStartupOption(conf);
  if (startOpt == StartupOption.RECOVER) {
    namesystem.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
  }

  long loadStart = now();
  String nameserviceId = DFSUtil.getNamenodeNameServiceId(conf);
  namesystem.loadFSImage(startOpt, fsImage,
    HAUtil.isHAEnabled(conf, nameserviceId));
  long timeTakenToLoadFSImage = now() - loadStart;
  LOG.info("Finished loading FSImage in " + timeTakenToLoadFSImage + " msecs");
  NameNodeMetrics nnMetrics = NameNode.getNameNodeMetrics();
  if (nnMetrics != null) {
    nnMetrics.setFsImageLoadTime((int) timeTakenToLoadFSImage);
  }
  return namesystem;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:35,代码来源:FSNamesystem.java

示例4: loadFromDisk

import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; //导入依赖的package包/类
/**
 * Instantiates an FSNamesystem loaded from the image and edits
 * directories specified in the passed Configuration.
 *
 * @param conf
 *     the Configuration which specifies the storage directories
 *     from which to load
 * @return an FSNamesystem which contains the loaded namespace
 * @throws IOException
 *     if loading fails
 */
public static FSNamesystem loadFromDisk(Configuration conf, NameNode namenode)
    throws IOException {

  FSNamesystem namesystem = new FSNamesystem(conf, namenode);
  StartupOption startOpt = NameNode.getStartupOption(conf);
  if (startOpt == StartupOption.RECOVER) {
    namesystem.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
  }

  long loadStart = now();

  namesystem.dir
      .imageLoadComplete();     //HOP: this function was called inside the  namesystem.loadFSImage(...) which is commented out

  long timeTakenToLoadFSImage = now() - loadStart;
  LOG.info(
      "Finished loading FSImage in " + timeTakenToLoadFSImage + " ms");
  NameNodeMetrics nnMetrics = NameNode.getNameNodeMetrics();
  if (nnMetrics != null) {
    nnMetrics.setFsImageLoadTime((int) timeTakenToLoadFSImage);
  }
  return namesystem;
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:35,代码来源:FSNamesystem.java

示例5: loadFromDisk

import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; //导入依赖的package包/类
/**
 * Instantiates an FSNamesystem loaded from the image and edits
 * directories specified in the passed Configuration.
 *
 * @param conf the Configuration which specifies the storage directories
 *             from which to load
 * @return an FSNamesystem which contains the loaded namespace
 * @throws IOException if loading fails
 */
static FSNamesystem loadFromDisk(Configuration conf) throws IOException {

  checkConfiguration(conf);
  FSImage fsImage = new FSImage(conf,
      FSNamesystem.getNamespaceDirs(conf),
      FSNamesystem.getNamespaceEditsDirs(conf));
  FSNamesystem namesystem = new FSNamesystem(conf, fsImage, false);
  StartupOption startOpt = NameNode.getStartupOption(conf);
  if (startOpt == StartupOption.RECOVER) {
    namesystem.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
  }

  long loadStart = monotonicNow();
  try {
    namesystem.loadFSImage(startOpt);
  } catch (IOException ioe) {
    LOG.warn("Encountered exception loading fsimage", ioe);
    fsImage.close();
    throw ioe;
  }
  long timeTakenToLoadFSImage = monotonicNow() - loadStart;
  LOG.info("Finished loading FSImage in " + timeTakenToLoadFSImage + " msecs");
  NameNodeMetrics nnMetrics = NameNode.getNameNodeMetrics();
  if (nnMetrics != null) {
    nnMetrics.setFsImageLoadTime((int) timeTakenToLoadFSImage);
  }
  return namesystem;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:38,代码来源:FSNamesystem.java

示例6: processCacheReport

import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; //导入依赖的package包/类
public final void processCacheReport(final DatanodeID datanodeID,
    final List<Long> blockIds) throws IOException {
  namesystem.writeLock();
  final long startTime = Time.monotonicNow();
  final long endTime;
  try {
    final DatanodeDescriptor datanode = 
        blockManager.getDatanodeManager().getDatanode(datanodeID);
    if (datanode == null || !datanode.isAlive) {
      throw new IOException(
          "processCacheReport from dead or unregistered datanode: " +
          datanode);
    }
    processCacheReportImpl(datanode, blockIds);
  } finally {
    endTime = Time.monotonicNow();
    namesystem.writeUnlock();
  }

  // Log the block report processing stats from Namenode perspective
  final NameNodeMetrics metrics = NameNode.getNameNodeMetrics();
  if (metrics != null) {
    metrics.addCacheBlockReport((int) (endTime - startTime));
  }
  LOG.debug("Processed cache report from {}, blocks: {}, " +
      "processing time: {} msecs", datanodeID, blockIds.size(), 
      (endTime - startTime));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:CacheManager.java

示例7: testAutoSync

import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; //导入依赖的package包/类
/**
 * Regression test for HDFS-1112/HDFS-3020. Ensures that, even if
 * logSync isn't called periodically, the edit log will sync itself.
 */
@Test
public void testAutoSync() throws Exception {
  File logDir = new File(TEST_DIR, "testAutoSync");
  logDir.mkdirs();
  FSEditLog log = FSImageTestUtil.createStandaloneEditLog(logDir);
  
  String oneKB = StringUtils.byteToHexString(
      new byte[500]);
  
  try {
    log.openForWrite();
    NameNodeMetrics mockMetrics = Mockito.mock(NameNodeMetrics.class);
    log.setMetricsForTests(mockMetrics);

    for (int i = 0; i < 400; i++) {
      log.logDelete(oneKB, 1L, false);
    }
    // After ~400KB, we're still within the 512KB buffer size
    Mockito.verify(mockMetrics, Mockito.times(0)).addSync(Mockito.anyLong());
    
    // After ~400KB more, we should have done an automatic sync
    for (int i = 0; i < 400; i++) {
      log.logDelete(oneKB, 1L, false);
    }
    Mockito.verify(mockMetrics, Mockito.times(1)).addSync(Mockito.anyLong());

  } finally {
    log.close();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:35,代码来源:TestEditLog.java

示例8: loadFromDisk

import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; //导入依赖的package包/类
/**
 * Instantiates an FSNamesystem loaded from the image and edits
 * directories specified in the passed Configuration.
 *
 * @param conf the Configuration which specifies the storage directories
 *             from which to load
 * @return an FSNamesystem which contains the loaded namespace
 * @throws IOException if loading fails
 */
static FSNamesystem loadFromDisk(Configuration conf) throws IOException {

  checkConfiguration(conf);
  FSImage fsImage = new FSImage(conf,
      FSNamesystem.getNamespaceDirs(conf),
      FSNamesystem.getNamespaceEditsDirs(conf));
  FSNamesystem namesystem = new FSNamesystem(conf, fsImage, false);
  StartupOption startOpt = NameNode.getStartupOption(conf);
  if (startOpt == StartupOption.RECOVER) {
    namesystem.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
  }

  long loadStart = monotonicNow();
  try {
    namesystem.loadFSImage(startOpt);
  } catch (IOException ioe) {
    LOG.warn("Encountered exception loading fsimage", ioe);
    fsImage.close();
    throw ioe;
  }
  long timeTakenToLoadFSImage = monotonicNow() - loadStart;
  LOG.info("Finished loading FSImage in " + timeTakenToLoadFSImage + " msecs");
  NameNodeMetrics nnMetrics = NameNode.getNameNodeMetrics();
  if (nnMetrics != null) {
    nnMetrics.setFsImageLoadTime((int) timeTakenToLoadFSImage);
  }
  namesystem.getFSDirectory().createReservedStatuses(namesystem.getCTime());
  return namesystem;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:39,代码来源:FSNamesystem.java

示例9: processCacheReport

import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; //导入依赖的package包/类
public final void processCacheReport(final DatanodeID datanodeID,
    final List<Long> blockIds) throws IOException {
  namesystem.writeLock();
  final long startTime = Time.monotonicNow();
  final long endTime;
  try {
    final DatanodeDescriptor datanode = 
        blockManager.getDatanodeManager().getDatanode(datanodeID);
    if (datanode == null || !datanode.isRegistered()) {
      throw new IOException(
          "processCacheReport from dead or unregistered datanode: " +
          datanode);
    }
    processCacheReportImpl(datanode, blockIds);
  } finally {
    endTime = Time.monotonicNow();
    namesystem.writeUnlock();
  }

  // Log the block report processing stats from Namenode perspective
  final NameNodeMetrics metrics = NameNode.getNameNodeMetrics();
  if (metrics != null) {
    metrics.addCacheBlockReport((int) (endTime - startTime));
  }
  LOG.debug("Processed cache report from {}, blocks: {}, " +
      "processing time: {} msecs", datanodeID, blockIds.size(), 
      (endTime - startTime));
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:29,代码来源:CacheManager.java

示例10: processQueue

import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; //导入依赖的package包/类
private void processQueue() {
  while (namesystem.isRunning()) {
    NameNodeMetrics metrics = NameNode.getNameNodeMetrics();
    try {
      Runnable action = queue.take();
      // batch as many operations in the write lock until the queue
      // runs dry, or the max lock hold is reached.
      int processed = 0;
      namesystem.writeLock();
      metrics.setBlockOpsQueued(queue.size() + 1);
      try {
        long start = Time.monotonicNow();
        do {
          processed++;
          action.run();
          if (Time.monotonicNow() - start > MAX_LOCK_HOLD_MS) {
            break;
          }
          action = queue.poll();
        } while (action != null);
      } finally {
        namesystem.writeUnlock();
        metrics.addBlockOpsBatched(processed - 1);
      }
    } catch (InterruptedException e) {
      // ignore unless thread was specifically interrupted.
      if (Thread.interrupted()) {
        break;
      }
    }
  }
  queue.clear();
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:34,代码来源:BlockManager.java

示例11: testAutoSync

import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; //导入依赖的package包/类
/**
 * Regression test for HDFS-1112/HDFS-3020. Ensures that, even if
 * logSync isn't called periodically, the edit log will sync itself.
 */
@Test
public void testAutoSync() throws Exception {
  File logDir = new File(TEST_DIR, "testAutoSync");
  logDir.mkdirs();
  FSEditLog log = FSImageTestUtil.createStandaloneEditLog(logDir);
  
  String oneKB = StringUtils.byteToHexString(
      new byte[500]);
  
  try {
    log.openForWrite(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
    NameNodeMetrics mockMetrics = Mockito.mock(NameNodeMetrics.class);
    log.setMetricsForTests(mockMetrics);

    for (int i = 0; i < 400; i++) {
      log.logDelete(oneKB, 1L, false);
    }
    // After ~400KB, we're still within the 512KB buffer size
    Mockito.verify(mockMetrics, Mockito.times(0)).addSync(Mockito.anyLong());
    
    // After ~400KB more, we should have done an automatic sync
    for (int i = 0; i < 400; i++) {
      log.logDelete(oneKB, 1L, false);
    }
    Mockito.verify(mockMetrics, Mockito.times(1)).addSync(Mockito.anyLong());

  } finally {
    log.close();
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:35,代码来源:TestEditLog.java

示例12: loadFromDisk

import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; //导入依赖的package包/类
/**
 * Instantiates an FSNamesystem loaded from the image and edits
 * directories specified in the passed Configuration.
 *
 * @param conf the Configuration which specifies the storage directories
 *             from which to load
 * @return an FSNamesystem which contains the loaded namespace
 * @throws IOException if loading fails
 */
static FSNamesystem loadFromDisk(Configuration conf) throws IOException {

  checkConfiguration(conf);
  FSImage fsImage = new FSImage(conf,
      FSNamesystem.getNamespaceDirs(conf),
      FSNamesystem.getNamespaceEditsDirs(conf));
  FSNamesystem namesystem = new FSNamesystem(conf, fsImage, false);
  StartupOption startOpt = NameNode.getStartupOption(conf);
  if (startOpt == StartupOption.RECOVER) {
    namesystem.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
  }

  long loadStart = now();
  try {
    namesystem.loadFSImage(startOpt);
  } catch (IOException ioe) {
    LOG.warn("Encountered exception loading fsimage", ioe);
    fsImage.close();
    throw ioe;
  }
  long timeTakenToLoadFSImage = now() - loadStart;
  LOG.info("Finished loading FSImage in " + timeTakenToLoadFSImage + " msecs");
  NameNodeMetrics nnMetrics = NameNode.getNameNodeMetrics();
  if (nnMetrics != null) {
    nnMetrics.setFsImageLoadTime((int) timeTakenToLoadFSImage);
  }
  return namesystem;
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:38,代码来源:FSNamesystem.java

示例13: ImageSet

import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; //导入依赖的package包/类
public ImageSet(FSImage fsImage, Collection<URI> fsDirs,
    Collection<URI> fsEditsDirs, NameNodeMetrics metrics) throws IOException {
  this.imageManagers = new ArrayList<ImageManager>();
  this.metrics = metrics;

  // get all IMAGE directories
  Iterator<StorageDirectory> it = fsImage.storage
      .dirIterator(NameNodeDirType.IMAGE);
  while (it.hasNext()) {
    StorageDirectory sd = it.next();
    validate(sd.getRoot(), fsDirs);
    imageManagers.add(new FileImageManager(sd, fsImage.storage));
  }
  
  // add all journal managers that store images
  List<JournalManager> nonFileJournalManagers = fsImage.editLog.getNonFileJournalManagers();
  for (JournalManager jm : nonFileJournalManagers) {
    if (jm instanceof ImageManager && jm.hasImageStorage()) {
      ImageManager im = (ImageManager) jm;
      validate(im.getURI(), fsDirs);
      imageManagers.add(im);
    }
  }
  
  // initialize metrics
  updateImageMetrics();
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:28,代码来源:ImageSet.java

示例14: initialize

import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; //导入依赖的package包/类
/**
 * Initialize name-node.
 *
 */
protected void initialize() throws IOException {
  // set service-level authorization security policy
  if (serviceAuthEnabled =
      getConf().getBoolean(
          ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
    PolicyProvider policyProvider =
      (PolicyProvider)(ReflectionUtils.newInstance(
          getConf().getClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
              HDFSPolicyProvider.class, PolicyProvider.class),
          getConf()));
    SecurityUtil.setPolicy(new ConfiguredPolicy(getConf(), policyProvider));
  }

  // This is a check that the port is free
  // create a socket and bind to it, throw exception if port is busy
  // This has to be done before we are reading Namesystem not to waste time and fail fast
  NetUtils.isSocketBindable(getClientProtocolAddress(getConf()));
  NetUtils.isSocketBindable(getDNProtocolAddress(getConf()));
  NetUtils.isSocketBindable(getHttpServerAddress(getConf()));

  long serverVersion = ClientProtocol.versionID;
  this.clientProtocolMethodsFingerprint = ProtocolSignature
      .getMethodsSigFingerPrint(ClientProtocol.class, serverVersion);

  myMetrics = new NameNodeMetrics(getConf(), this);

  this.clusterName = getConf().get(FSConstants.DFS_CLUSTER_NAME);
  this.namesystem = new FSNamesystem(this, getConf());
  // HACK: from removal of FSNamesystem.getFSNamesystem().
  JspHelper.fsn = this.namesystem;

  this.startDNServer();
  startHttpServer(getConf());
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:39,代码来源:NameNode.java

示例15: processCacheReport

import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; //导入依赖的package包/类
public final void processCacheReport(final DatanodeID datanodeID,
    final List<Long> blockIds) throws IOException {
  namesystem.writeLock();
  final long startTime = Time.monotonicNow();
  final long endTime;
  try {
    final DatanodeDescriptor datanode = 
        blockManager.getDatanodeManager().getDatanode(datanodeID);
    if (datanode == null || !datanode.isAlive) {
      throw new IOException(
          "processCacheReport from dead or unregistered datanode: " +
          datanode);
    }
    processCacheReportImpl(datanode, blockIds);
  } finally {
    endTime = Time.monotonicNow();
    namesystem.writeUnlock();
  }

  // Log the block report processing stats from Namenode perspective
  final NameNodeMetrics metrics = NameNode.getNameNodeMetrics();
  if (metrics != null) {
    metrics.addCacheBlockReport((int) (endTime - startTime));
  }
  if (LOG.isDebugEnabled()) {
    LOG.debug("Processed cache report from "
        + datanodeID + ", blocks: " + blockIds.size()
        + ", processing time: " + (endTime - startTime) + " msecs");
  }
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:31,代码来源:CacheManager.java


注:本文中的org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。