当前位置: 首页>>代码示例>>Java>>正文


Java DataNodeMetrics类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics的典型用法代码示例。如果您正苦于以下问题:Java DataNodeMetrics类的具体用法?Java DataNodeMetrics怎么用?Java DataNodeMetrics使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


DataNodeMetrics类属于org.apache.hadoop.hdfs.server.datanode.metrics包,在下文中一共展示了DataNodeMetrics类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: setupMocks

import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics; //导入依赖的package包/类
@Before
public void setupMocks() throws Exception {
  mockNN1 = setupNNMock(0);
  mockNN2 = setupNNMock(1);

  // Set up a mock DN with the bare-bones configuration
  // objects, etc.
  mockDn = Mockito.mock(DataNode.class);
  Mockito.doReturn(true).when(mockDn).shouldRun();
  Configuration conf = new Configuration();
  File dnDataDir = new File(new File(TEST_BUILD_DATA, "dfs"), "data");
  conf.set(DFS_DATANODE_DATA_DIR_KEY, dnDataDir.toURI().toString());
  Mockito.doReturn(conf).when(mockDn).getConf();
  Mockito.doReturn(new DNConf(conf)).when(mockDn).getDnConf();
  Mockito.doReturn(DataNodeMetrics.create(conf, "fake dn"))
  .when(mockDn).getMetrics();

  // Set up a simulated dataset with our fake BP
  mockFSDataset = Mockito.spy(new SimulatedFSDataset(null, conf));
  mockFSDataset.addBlockPool(FAKE_BPID, conf);

  // Wire the dataset to the DN.
  Mockito.doReturn(mockFSDataset).when(mockDn).getFSDataset();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:TestBPOfferService.java

示例2: initGlobalSetting

import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics; //导入依赖的package包/类
/**
 * Initialize global settings for DN
 */
protected void initGlobalSetting(Configuration conf,
    AbstractList<File> dataDirs) throws IOException {
  this.dataDirs = dataDirs;
  this.conf = conf;
  storage = new DataStorage(this);
  
  // global DN settings
  initConfig(conf);
  registerMXBean();
  initDataXceiver(conf);
  startInfoServer(conf);
  initIpcServer(conf);

  myMetrics = new DataNodeMetrics(conf, storage.getStorageID());
  setCountingLoggers(myMetrics);
  threadLivenessReporter = new DatanodeThreadLivenessReporter(conf.getLong(
      "dfs.datanode.thread.liveness.threshold", 240 * 1000),
      myMetrics.threadActiveness);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:23,代码来源:DataNode.java

示例3: setupMocks

import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics; //导入依赖的package包/类
@Before
public void setupMocks() throws Exception {
  mockNN1 = setupNNMock(0);
  mockNN2 = setupNNMock(1);

  // Set up a mock DN with the bare-bones configuration
  // objects, etc.
  mockDn = Mockito.mock(DataNode.class);
  Mockito.doReturn(true).when(mockDn).shouldRun();
  Configuration conf = new Configuration();
  File dnDataDir = new File(new File(TEST_BUILD_DATA, "dfs"), "data");
  conf.set(DFS_DATANODE_DATA_DIR_KEY, dnDataDir.toURI().toString());
  Mockito.doReturn(conf).when(mockDn).getConf();
  Mockito.doReturn(new DNConf(conf)).when(mockDn).getDnConf();
  Mockito.doReturn(DataNodeMetrics.create(conf, "fake dn"))
  .when(mockDn).getMetrics();

  // Set up a simulated dataset with our fake BP
  mockFSDataset = Mockito.spy(new SimulatedFSDataset(null, null, conf));
  mockFSDataset.addBlockPool(FAKE_BPID, conf);

  // Wire the dataset to the DN.
  Mockito.doReturn(mockFSDataset).when(mockDn).getFSDataset();
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:25,代码来源:TestBPOfferService.java

示例4: testDataNodeMetrics

import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics; //导入依赖的package包/类
public void testDataNodeMetrics() throws Exception {
  Configuration conf = new HdfsConfiguration();
  conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  try {
    FileSystem fs = cluster.getFileSystem();
    final long LONG_FILE_LEN = Integer.MAX_VALUE+1L; 
    DFSTestUtil.createFile(fs, new Path("/tmp.txt"),
        LONG_FILE_LEN, (short)1, 1L);
    List<DataNode> datanodes = cluster.getDataNodes();
    assertEquals(datanodes.size(), 1);
    DataNode datanode = datanodes.get(0);
    DataNodeMetrics metrics = datanode.getMetrics();
    assertEquals(LONG_FILE_LEN, metrics.bytesWritten.getCurrentIntervalValue());
  } finally {
    if (cluster != null) {cluster.shutdown();}
  }
}
 
开发者ID:cumulusyebl,项目名称:cumulus,代码行数:19,代码来源:TestDataNodeMetrics.java

示例5: initGlobalSetting

import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics; //导入依赖的package包/类
/**
 * Initialize global settings for DN
 */
protected void initGlobalSetting(Configuration conf,
    AbstractList<File> dataDirs) throws IOException {
  this.dataDirs = dataDirs;
  this.conf = conf;
  storage = new DataStorage(this);
  
  // global DN settings
  initConfig(conf);
  registerMXBean();
  initDataXceiver(conf);
  startInfoServer(conf);
  initIpcServer(conf);

  myMetrics = new DataNodeMetrics(conf, storage.getStorageID());
}
 
开发者ID:iVCE,项目名称:RDFS,代码行数:19,代码来源:DataNode.java

示例6: testDataNodeMetrics

import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics; //导入依赖的package包/类
public void testDataNodeMetrics() throws Exception {
  Configuration conf = new Configuration();
  conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
  MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
  try {
    FileSystem fs = cluster.getFileSystem();
    final long LONG_FILE_LEN = Integer.MAX_VALUE+1L; 
    DFSTestUtil.createFile(fs, new Path("/tmp.txt"),
        LONG_FILE_LEN, (short)1, 1L);
    List<DataNode> datanodes = cluster.getDataNodes();
    assertEquals(datanodes.size(), 1);
    DataNode datanode = datanodes.get(0);
    DataNodeMetrics metrics = datanode.getMetrics();
    assertEquals(LONG_FILE_LEN, metrics.bytesWritten.getCurrentIntervalValue());
  } finally {
    if (cluster != null) {cluster.shutdown();}
  }
}
 
开发者ID:thisisvoa,项目名称:hadoop-0.20,代码行数:19,代码来源:TestDataNodeMetrics.java

示例7: testBPInitErrorHandling

import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics; //导入依赖的package包/类
/**
 * Test datanode block pool initialization error handling.
 * Failure in initializing a block pool should not cause NPE.
 */
@Test
public void testBPInitErrorHandling() throws Exception {
  final DataNode mockDn = Mockito.mock(DataNode.class);
  Mockito.doReturn(true).when(mockDn).shouldRun();
  Configuration conf = new Configuration();
  File dnDataDir = new File(
    new File(TEST_BUILD_DATA, "testBPInitErrorHandling"), "data");
  conf.set(DFS_DATANODE_DATA_DIR_KEY, dnDataDir.toURI().toString());
  Mockito.doReturn(conf).when(mockDn).getConf();
  Mockito.doReturn(new DNConf(conf)).when(mockDn).getDnConf();
  Mockito.doReturn(DataNodeMetrics.create(conf, "fake dn")).
    when(mockDn).getMetrics();
  final AtomicInteger count = new AtomicInteger();
  Mockito.doAnswer(new Answer<Void>() {
    @Override
    public Void answer(InvocationOnMock invocation) throws Throwable {
      if (count.getAndIncrement() == 0) {
        throw new IOException("faked initBlockPool exception");
      }
      // The initBlockPool is called again. Now mock init is done.
      Mockito.doReturn(mockFSDataset).when(mockDn).getFSDataset();
      return null;
    }
  }).when(mockDn).initBlockPool(Mockito.any(BPOfferService.class));
  BPOfferService bpos = setupBPOSForNNs(mockDn, mockNN1, mockNN2);
  List<BPServiceActor> actors = bpos.getBPServiceActors();
  assertEquals(2, actors.size());
  bpos.start();
  try {
    waitForInitialization(bpos);
    // even if one of the actor initialization fails, the other one will be
    // finish block report.
    waitForBlockReport(mockNN1, mockNN2);
  } finally {
    bpos.stop();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:42,代码来源:TestBPOfferService.java

示例8: testBPInitErrorHandling

import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics; //导入依赖的package包/类
/**
 * Test datanode block pool initialization error handling.
 * Failure in initializing a block pool should not cause NPE.
 */
@Test
public void testBPInitErrorHandling() throws Exception {
  final DataNode mockDn = Mockito.mock(DataNode.class);
  Mockito.doReturn(true).when(mockDn).shouldRun();
  Configuration conf = new Configuration();
  File dnDataDir = new File(
    new File(TEST_BUILD_DATA, "testBPInitErrorHandling"), "data");
  conf.set(DFS_DATANODE_DATA_DIR_KEY, dnDataDir.toURI().toString());
  Mockito.doReturn(conf).when(mockDn).getConf();
  Mockito.doReturn(new DNConf(conf)).when(mockDn).getDnConf();
  Mockito.doReturn(DataNodeMetrics.create(conf, "fake dn")).
    when(mockDn).getMetrics();
  final AtomicInteger count = new AtomicInteger();
  Mockito.doAnswer(new Answer<Void>() {
    @Override
    public Void answer(InvocationOnMock invocation) throws Throwable {
      if (count.getAndIncrement() == 0) {
        throw new IOException("faked initBlockPool exception");
      }
      // The initBlockPool is called again. Now mock init is done.
      Mockito.doReturn(mockFSDataset).when(mockDn).getFSDataset();
      return null;
    }
  }).when(mockDn).initBlockPool(Mockito.any(BPOfferService.class));
  BPOfferService bpos = setupBPOSForNNs(mockDn, mockNN1, mockNN2);
  List<BPServiceActor> actors = bpos.getBPServiceActors();
  assertEquals(2, actors.size());
  bpos.start();
  try {
    waitForInitialization(bpos);
    // even if one of the actor initialization fails, the other one will be
    // finish block report.
    waitForBlockReport(mockNN1, mockNN2);
  } finally {
    bpos.stop();
    bpos.join();
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:43,代码来源:TestBPOfferService.java

示例9: getMockDn

import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics; //导入依赖的package包/类
private static DataNode getMockDn(NonLocalLazyPersist nonLocalLazyPersist) {
  Configuration conf = new HdfsConfiguration();
  conf.setBoolean(
      DFS_DATANODE_NON_LOCAL_LAZY_PERSIST,
      nonLocalLazyPersist == NonLocalLazyPersist.ALLOWED);
  DNConf dnConf = new DNConf(conf);
  DataNodeMetrics mockMetrics = mock(DataNodeMetrics.class);
  DataNode mockDn = mock(DataNode.class);
  when(mockDn.getDnConf()).thenReturn(dnConf);
  when(mockDn.getConf()).thenReturn(conf);
  when(mockDn.getMetrics()).thenReturn(mockMetrics);
  return mockDn;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:14,代码来源:TestDataXceiverLazyPersistHint.java

示例10: testStuckDataNode

import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics; //导入依赖的package包/类
/** This creates a slow writer and check to see
  * if pipeline heartbeats work fine
  */
 public void testStuckDataNode() throws Exception {
   final int DATANODE_NUM = 3;
   Configuration conf = new Configuration();
   final int timeout = 8000;
   conf.setInt("dfs.socket.timeout",timeout);

   final Path p = new Path("/pipelineHeartbeat/foo");
   System.out.println("p=" + p);

   MiniDFSCluster cluster = new MiniDFSCluster(conf, DATANODE_NUM, true, null);
   DistributedFileSystem fs = (DistributedFileSystem)cluster.getFileSystem();

DataNodeMetrics metrics = cluster.getDataNodes().get(0).myMetrics;
MetricsTimeVaryingLong spyBytesWritten = spy(metrics.bytesWritten);
DelayAnswer delayAnswer = new DelayAnswer(); 
doAnswer(delayAnswer).when(spyBytesWritten).inc(anyInt());
metrics.bytesWritten = spyBytesWritten;

try {
   	// create a new file.
   	FSDataOutputStream stm = fs.create(p);
   	stm.write(1);
   	stm.sync();
   	stm.write(2);
   	stm.close();

   	// verify that entire file is good
   	FSDataInputStream in = fs.open(p);
   	assertEquals(1, in.read());
   	assertEquals(2, in.read());
   	in.close();
   } finally {
     fs.close();
     cluster.shutdown();
   }
 }
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:40,代码来源:TestStuckDataNode.java

示例11: startDataNode

import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics; //导入依赖的package包/类
/**
 * This method starts the data node with the specified conf.
 * 
 * @param conf - the configuration
 *  if conf's CONFIG_PROPERTY_SIMULATED property is set
 *  then a simulated storage based data node is created.
 * 
 * @param dataDirs - only for a non-simulated storage data node
 * @throws IOException
 */
void startDataNode(Configuration conf, 
                   AbstractList<File> dataDirs,
                  // DatanodeProtocol namenode,
                   SecureResources resources
                   ) throws IOException {
  if(UserGroupInformation.isSecurityEnabled() && resources == null) {
    if (!conf.getBoolean("ignore.secure.ports.for.testing", false)) {
      throw new RuntimeException("Cannot start secure cluster without "
          + "privileged resources.");
    }
  }

  // settings global for all BPs in the Data Node
  this.secureResources = resources;
  this.dataDirs = dataDirs;
  this.conf = conf;
  this.dnConf = new DNConf(conf);

  storage = new DataStorage();
  
  // global DN settings
  registerMXBean();
  initDataXceiver(conf);
  startInfoServer(conf);

  // BlockPoolTokenSecretManager is required to create ipc server.
  this.blockPoolTokenSecretManager = new BlockPoolTokenSecretManager();
  initIpcServer(conf);

  metrics = DataNodeMetrics.create(conf, getDisplayName());

  blockPoolManager = new BlockPoolManager(this);
  blockPoolManager.refreshNamenodes(conf);

  // Create the ReadaheadPool from the DataNode context so we can
  // exit without having to explicitly shutdown its thread pool.
  readaheadPool = ReadaheadPool.getInstance();
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:49,代码来源:DataNode.java

示例12: startDataNode

import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics; //导入依赖的package包/类
/**
 * This method starts the data node with the specified conf.
 *
 * @param conf
 *     - the configuration
 *     if conf's CONFIG_PROPERTY_SIMULATED property is set
 *     then a simulated storage based data node is created.
 * @param dataDirs
 *     - only for a non-simulated storage data node
 * @throws IOException
 */
void startDataNode(Configuration conf, AbstractList<File> dataDirs,
    // DatanodeProtocol namenode,
    SecureResources resources) throws IOException {
  if (UserGroupInformation.isSecurityEnabled() && resources == null) {
    if (!conf.getBoolean("ignore.secure.ports.for.testing", false)) {
      throw new RuntimeException(
          "Cannot start secure cluster without " + "privileged resources.");
    }
  }

  // settings global for all BPs in the Data Node
  this.secureResources = resources;
  this.dataDirs = dataDirs;
  this.conf = conf;
  this.dnConf = new DNConf(conf);

  storage = new DataStorage();
  
  // global DN settings
  registerMXBean();
  initDataXceiver(conf);
  startInfoServer(conf);

  // BlockPoolTokenSecretManager is required to create ipc server.
  this.blockPoolTokenSecretManager = new BlockPoolTokenSecretManager();
  initIpcServer(conf);

  metrics = DataNodeMetrics.create(conf, getDisplayName());

  blockPoolManager = new BlockPoolManager(this);
  blockPoolManager.refreshNamenodes(conf);

  // Create the ReadaheadPool from the DataNode context so we can
  // exit without having to explicitly shutdown its thread pool.
  readaheadPool = ReadaheadPool.getInstance();
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:48,代码来源:DataNode.java

示例13: testBPInitErrorHandling

import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics; //导入依赖的package包/类
/**
 * Test datanode block pool initialization error handling.
 * Failure in initializing a block pool should not cause NPE.
 */
@Test
public void testBPInitErrorHandling() throws Exception {
  final DataNode mockDn = Mockito.mock(DataNode.class);
  Mockito.doReturn(true).when(mockDn).shouldRun();
  Configuration conf = new Configuration();
  File dnDataDir = new File(
    new File(TEST_BUILD_DATA, "testBPInitErrorHandling"), "data");
  conf.set(DFS_DATANODE_DATA_DIR_KEY, dnDataDir.toURI().toString());
  Mockito.doReturn(conf).when(mockDn).getConf();
  Mockito.doReturn(new DNConf(conf)).when(mockDn).getDnConf();
  Mockito.doReturn(DataNodeMetrics.create(conf, "fake dn")).
    when(mockDn).getMetrics();
  final AtomicInteger count = new AtomicInteger();
  Mockito.doAnswer(new Answer<Void>() {
    @Override
    public Void answer(InvocationOnMock invocation) throws Throwable {
      if (count.getAndIncrement() == 0) {
        throw new IOException("faked initBlockPool exception");
      }
      // The initBlockPool is called again. Now mock init is done.
      Mockito.doReturn(mockFSDataset).when(mockDn).getFSDataset();
      return null;
    }
  }).when(mockDn).initBlockPool(Mockito.any(BPOfferService.class));
  BPOfferService bpos = setupBPOSForNNs(mockDn, mockNN1, mockNN2);
  bpos.start();
  try {
    waitForInitialization(bpos);
    List<BPServiceActor> actors = bpos.getBPServiceActors();
    assertEquals(1, actors.size());
    BPServiceActor actor = actors.get(0);
    waitForBlockReport(actor.getNameNodeProxy());
  } finally {
    bpos.stop();
  }
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:41,代码来源:TestBPOfferService.java

示例14: startDataNode

import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics; //导入依赖的package包/类
/**
 * This method starts the data node with the specified conf.
 * 
 * @param conf - the configuration
 *  if conf's CONFIG_PROPERTY_SIMULATED property is set
 *  then a simulated storage based data node is created.
 * 
 * @param dataDirs - only for a non-simulated storage data node
 * @throws IOException
 */
void startDataNode(Configuration conf, 
                   AbstractList<File> dataDirs,
                   DatanodeProtocol namenode, SecureResources resources
                   ) throws IOException {
  if(UserGroupInformation.isSecurityEnabled() && resources == null)
    throw new RuntimeException("Cannot start secure cluster without " +
    "privileged resources.");

  this.secureResources = resources;
  this.namenode = namenode;
  storage = new DataStorage();
  
  initConfig(conf);
  registerMXBean();
  initDataXceiver(conf);
  initFsDataSet(conf, dataDirs);
  initBlockScanner(conf);
  startInfoServer(conf);

  myMetrics = new DataNodeMetrics(conf, dnRegistration.getName());
  // TODO check what code removed here

  initIpcServer(conf);
  startPlugins(conf);
  
  // BlockTokenSecretManager is created here, but it shouldn't be
  // used until it is initialized in register().
  this.blockTokenSecretManager = new BlockTokenSecretManager(false, 0, 0);
}
 
开发者ID:cumulusyebl,项目名称:cumulus,代码行数:40,代码来源:DataNode.java

示例15: getMetrics

import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics; //导入依赖的package包/类
public DataNodeMetrics getMetrics() {
  return metrics;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:4,代码来源:DataNode.java


注:本文中的org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。