本文整理汇总了Java中org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics类的典型用法代码示例。如果您正苦于以下问题:Java DataNodeMetrics类的具体用法?Java DataNodeMetrics怎么用?Java DataNodeMetrics使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
DataNodeMetrics类属于org.apache.hadoop.hdfs.server.datanode.metrics包,在下文中一共展示了DataNodeMetrics类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: setupMocks
import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics; //导入依赖的package包/类
@Before
public void setupMocks() throws Exception {
mockNN1 = setupNNMock(0);
mockNN2 = setupNNMock(1);
// Set up a mock DN with the bare-bones configuration
// objects, etc.
mockDn = Mockito.mock(DataNode.class);
Mockito.doReturn(true).when(mockDn).shouldRun();
Configuration conf = new Configuration();
File dnDataDir = new File(new File(TEST_BUILD_DATA, "dfs"), "data");
conf.set(DFS_DATANODE_DATA_DIR_KEY, dnDataDir.toURI().toString());
Mockito.doReturn(conf).when(mockDn).getConf();
Mockito.doReturn(new DNConf(conf)).when(mockDn).getDnConf();
Mockito.doReturn(DataNodeMetrics.create(conf, "fake dn"))
.when(mockDn).getMetrics();
// Set up a simulated dataset with our fake BP
mockFSDataset = Mockito.spy(new SimulatedFSDataset(null, conf));
mockFSDataset.addBlockPool(FAKE_BPID, conf);
// Wire the dataset to the DN.
Mockito.doReturn(mockFSDataset).when(mockDn).getFSDataset();
}
示例2: initGlobalSetting
import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics; //导入依赖的package包/类
/**
* Initialize global settings for DN
*/
protected void initGlobalSetting(Configuration conf,
AbstractList<File> dataDirs) throws IOException {
this.dataDirs = dataDirs;
this.conf = conf;
storage = new DataStorage(this);
// global DN settings
initConfig(conf);
registerMXBean();
initDataXceiver(conf);
startInfoServer(conf);
initIpcServer(conf);
myMetrics = new DataNodeMetrics(conf, storage.getStorageID());
setCountingLoggers(myMetrics);
threadLivenessReporter = new DatanodeThreadLivenessReporter(conf.getLong(
"dfs.datanode.thread.liveness.threshold", 240 * 1000),
myMetrics.threadActiveness);
}
示例3: setupMocks
import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics; //导入依赖的package包/类
@Before
public void setupMocks() throws Exception {
mockNN1 = setupNNMock(0);
mockNN2 = setupNNMock(1);
// Set up a mock DN with the bare-bones configuration
// objects, etc.
mockDn = Mockito.mock(DataNode.class);
Mockito.doReturn(true).when(mockDn).shouldRun();
Configuration conf = new Configuration();
File dnDataDir = new File(new File(TEST_BUILD_DATA, "dfs"), "data");
conf.set(DFS_DATANODE_DATA_DIR_KEY, dnDataDir.toURI().toString());
Mockito.doReturn(conf).when(mockDn).getConf();
Mockito.doReturn(new DNConf(conf)).when(mockDn).getDnConf();
Mockito.doReturn(DataNodeMetrics.create(conf, "fake dn"))
.when(mockDn).getMetrics();
// Set up a simulated dataset with our fake BP
mockFSDataset = Mockito.spy(new SimulatedFSDataset(null, null, conf));
mockFSDataset.addBlockPool(FAKE_BPID, conf);
// Wire the dataset to the DN.
Mockito.doReturn(mockFSDataset).when(mockDn).getFSDataset();
}
示例4: testDataNodeMetrics
import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics; //导入依赖的package包/类
public void testDataNodeMetrics() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
FileSystem fs = cluster.getFileSystem();
final long LONG_FILE_LEN = Integer.MAX_VALUE+1L;
DFSTestUtil.createFile(fs, new Path("/tmp.txt"),
LONG_FILE_LEN, (short)1, 1L);
List<DataNode> datanodes = cluster.getDataNodes();
assertEquals(datanodes.size(), 1);
DataNode datanode = datanodes.get(0);
DataNodeMetrics metrics = datanode.getMetrics();
assertEquals(LONG_FILE_LEN, metrics.bytesWritten.getCurrentIntervalValue());
} finally {
if (cluster != null) {cluster.shutdown();}
}
}
示例5: initGlobalSetting
import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics; //导入依赖的package包/类
/**
* Initialize global settings for DN
*/
protected void initGlobalSetting(Configuration conf,
AbstractList<File> dataDirs) throws IOException {
this.dataDirs = dataDirs;
this.conf = conf;
storage = new DataStorage(this);
// global DN settings
initConfig(conf);
registerMXBean();
initDataXceiver(conf);
startInfoServer(conf);
initIpcServer(conf);
myMetrics = new DataNodeMetrics(conf, storage.getStorageID());
}
示例6: testDataNodeMetrics
import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics; //导入依赖的package包/类
public void testDataNodeMetrics() throws Exception {
Configuration conf = new Configuration();
conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
try {
FileSystem fs = cluster.getFileSystem();
final long LONG_FILE_LEN = Integer.MAX_VALUE+1L;
DFSTestUtil.createFile(fs, new Path("/tmp.txt"),
LONG_FILE_LEN, (short)1, 1L);
List<DataNode> datanodes = cluster.getDataNodes();
assertEquals(datanodes.size(), 1);
DataNode datanode = datanodes.get(0);
DataNodeMetrics metrics = datanode.getMetrics();
assertEquals(LONG_FILE_LEN, metrics.bytesWritten.getCurrentIntervalValue());
} finally {
if (cluster != null) {cluster.shutdown();}
}
}
示例7: testBPInitErrorHandling
import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics; //导入依赖的package包/类
/**
* Test datanode block pool initialization error handling.
* Failure in initializing a block pool should not cause NPE.
*/
@Test
public void testBPInitErrorHandling() throws Exception {
final DataNode mockDn = Mockito.mock(DataNode.class);
Mockito.doReturn(true).when(mockDn).shouldRun();
Configuration conf = new Configuration();
File dnDataDir = new File(
new File(TEST_BUILD_DATA, "testBPInitErrorHandling"), "data");
conf.set(DFS_DATANODE_DATA_DIR_KEY, dnDataDir.toURI().toString());
Mockito.doReturn(conf).when(mockDn).getConf();
Mockito.doReturn(new DNConf(conf)).when(mockDn).getDnConf();
Mockito.doReturn(DataNodeMetrics.create(conf, "fake dn")).
when(mockDn).getMetrics();
final AtomicInteger count = new AtomicInteger();
Mockito.doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
if (count.getAndIncrement() == 0) {
throw new IOException("faked initBlockPool exception");
}
// The initBlockPool is called again. Now mock init is done.
Mockito.doReturn(mockFSDataset).when(mockDn).getFSDataset();
return null;
}
}).when(mockDn).initBlockPool(Mockito.any(BPOfferService.class));
BPOfferService bpos = setupBPOSForNNs(mockDn, mockNN1, mockNN2);
List<BPServiceActor> actors = bpos.getBPServiceActors();
assertEquals(2, actors.size());
bpos.start();
try {
waitForInitialization(bpos);
// even if one of the actor initialization fails, the other one will be
// finish block report.
waitForBlockReport(mockNN1, mockNN2);
} finally {
bpos.stop();
}
}
示例8: testBPInitErrorHandling
import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics; //导入依赖的package包/类
/**
* Test datanode block pool initialization error handling.
* Failure in initializing a block pool should not cause NPE.
*/
@Test
public void testBPInitErrorHandling() throws Exception {
final DataNode mockDn = Mockito.mock(DataNode.class);
Mockito.doReturn(true).when(mockDn).shouldRun();
Configuration conf = new Configuration();
File dnDataDir = new File(
new File(TEST_BUILD_DATA, "testBPInitErrorHandling"), "data");
conf.set(DFS_DATANODE_DATA_DIR_KEY, dnDataDir.toURI().toString());
Mockito.doReturn(conf).when(mockDn).getConf();
Mockito.doReturn(new DNConf(conf)).when(mockDn).getDnConf();
Mockito.doReturn(DataNodeMetrics.create(conf, "fake dn")).
when(mockDn).getMetrics();
final AtomicInteger count = new AtomicInteger();
Mockito.doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
if (count.getAndIncrement() == 0) {
throw new IOException("faked initBlockPool exception");
}
// The initBlockPool is called again. Now mock init is done.
Mockito.doReturn(mockFSDataset).when(mockDn).getFSDataset();
return null;
}
}).when(mockDn).initBlockPool(Mockito.any(BPOfferService.class));
BPOfferService bpos = setupBPOSForNNs(mockDn, mockNN1, mockNN2);
List<BPServiceActor> actors = bpos.getBPServiceActors();
assertEquals(2, actors.size());
bpos.start();
try {
waitForInitialization(bpos);
// even if one of the actor initialization fails, the other one will be
// finish block report.
waitForBlockReport(mockNN1, mockNN2);
} finally {
bpos.stop();
bpos.join();
}
}
示例9: getMockDn
import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics; //导入依赖的package包/类
private static DataNode getMockDn(NonLocalLazyPersist nonLocalLazyPersist) {
Configuration conf = new HdfsConfiguration();
conf.setBoolean(
DFS_DATANODE_NON_LOCAL_LAZY_PERSIST,
nonLocalLazyPersist == NonLocalLazyPersist.ALLOWED);
DNConf dnConf = new DNConf(conf);
DataNodeMetrics mockMetrics = mock(DataNodeMetrics.class);
DataNode mockDn = mock(DataNode.class);
when(mockDn.getDnConf()).thenReturn(dnConf);
when(mockDn.getConf()).thenReturn(conf);
when(mockDn.getMetrics()).thenReturn(mockMetrics);
return mockDn;
}
示例10: testStuckDataNode
import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics; //导入依赖的package包/类
/** This creates a slow writer and check to see
* if pipeline heartbeats work fine
*/
public void testStuckDataNode() throws Exception {
final int DATANODE_NUM = 3;
Configuration conf = new Configuration();
final int timeout = 8000;
conf.setInt("dfs.socket.timeout",timeout);
final Path p = new Path("/pipelineHeartbeat/foo");
System.out.println("p=" + p);
MiniDFSCluster cluster = new MiniDFSCluster(conf, DATANODE_NUM, true, null);
DistributedFileSystem fs = (DistributedFileSystem)cluster.getFileSystem();
DataNodeMetrics metrics = cluster.getDataNodes().get(0).myMetrics;
MetricsTimeVaryingLong spyBytesWritten = spy(metrics.bytesWritten);
DelayAnswer delayAnswer = new DelayAnswer();
doAnswer(delayAnswer).when(spyBytesWritten).inc(anyInt());
metrics.bytesWritten = spyBytesWritten;
try {
// create a new file.
FSDataOutputStream stm = fs.create(p);
stm.write(1);
stm.sync();
stm.write(2);
stm.close();
// verify that entire file is good
FSDataInputStream in = fs.open(p);
assertEquals(1, in.read());
assertEquals(2, in.read());
in.close();
} finally {
fs.close();
cluster.shutdown();
}
}
示例11: startDataNode
import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics; //导入依赖的package包/类
/**
* This method starts the data node with the specified conf.
*
* @param conf - the configuration
* if conf's CONFIG_PROPERTY_SIMULATED property is set
* then a simulated storage based data node is created.
*
* @param dataDirs - only for a non-simulated storage data node
* @throws IOException
*/
void startDataNode(Configuration conf,
AbstractList<File> dataDirs,
// DatanodeProtocol namenode,
SecureResources resources
) throws IOException {
if(UserGroupInformation.isSecurityEnabled() && resources == null) {
if (!conf.getBoolean("ignore.secure.ports.for.testing", false)) {
throw new RuntimeException("Cannot start secure cluster without "
+ "privileged resources.");
}
}
// settings global for all BPs in the Data Node
this.secureResources = resources;
this.dataDirs = dataDirs;
this.conf = conf;
this.dnConf = new DNConf(conf);
storage = new DataStorage();
// global DN settings
registerMXBean();
initDataXceiver(conf);
startInfoServer(conf);
// BlockPoolTokenSecretManager is required to create ipc server.
this.blockPoolTokenSecretManager = new BlockPoolTokenSecretManager();
initIpcServer(conf);
metrics = DataNodeMetrics.create(conf, getDisplayName());
blockPoolManager = new BlockPoolManager(this);
blockPoolManager.refreshNamenodes(conf);
// Create the ReadaheadPool from the DataNode context so we can
// exit without having to explicitly shutdown its thread pool.
readaheadPool = ReadaheadPool.getInstance();
}
示例12: startDataNode
import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics; //导入依赖的package包/类
/**
* This method starts the data node with the specified conf.
*
* @param conf
* - the configuration
* if conf's CONFIG_PROPERTY_SIMULATED property is set
* then a simulated storage based data node is created.
* @param dataDirs
* - only for a non-simulated storage data node
* @throws IOException
*/
void startDataNode(Configuration conf, AbstractList<File> dataDirs,
// DatanodeProtocol namenode,
SecureResources resources) throws IOException {
if (UserGroupInformation.isSecurityEnabled() && resources == null) {
if (!conf.getBoolean("ignore.secure.ports.for.testing", false)) {
throw new RuntimeException(
"Cannot start secure cluster without " + "privileged resources.");
}
}
// settings global for all BPs in the Data Node
this.secureResources = resources;
this.dataDirs = dataDirs;
this.conf = conf;
this.dnConf = new DNConf(conf);
storage = new DataStorage();
// global DN settings
registerMXBean();
initDataXceiver(conf);
startInfoServer(conf);
// BlockPoolTokenSecretManager is required to create ipc server.
this.blockPoolTokenSecretManager = new BlockPoolTokenSecretManager();
initIpcServer(conf);
metrics = DataNodeMetrics.create(conf, getDisplayName());
blockPoolManager = new BlockPoolManager(this);
blockPoolManager.refreshNamenodes(conf);
// Create the ReadaheadPool from the DataNode context so we can
// exit without having to explicitly shutdown its thread pool.
readaheadPool = ReadaheadPool.getInstance();
}
示例13: testBPInitErrorHandling
import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics; //导入依赖的package包/类
/**
* Test datanode block pool initialization error handling.
* Failure in initializing a block pool should not cause NPE.
*/
@Test
public void testBPInitErrorHandling() throws Exception {
final DataNode mockDn = Mockito.mock(DataNode.class);
Mockito.doReturn(true).when(mockDn).shouldRun();
Configuration conf = new Configuration();
File dnDataDir = new File(
new File(TEST_BUILD_DATA, "testBPInitErrorHandling"), "data");
conf.set(DFS_DATANODE_DATA_DIR_KEY, dnDataDir.toURI().toString());
Mockito.doReturn(conf).when(mockDn).getConf();
Mockito.doReturn(new DNConf(conf)).when(mockDn).getDnConf();
Mockito.doReturn(DataNodeMetrics.create(conf, "fake dn")).
when(mockDn).getMetrics();
final AtomicInteger count = new AtomicInteger();
Mockito.doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
if (count.getAndIncrement() == 0) {
throw new IOException("faked initBlockPool exception");
}
// The initBlockPool is called again. Now mock init is done.
Mockito.doReturn(mockFSDataset).when(mockDn).getFSDataset();
return null;
}
}).when(mockDn).initBlockPool(Mockito.any(BPOfferService.class));
BPOfferService bpos = setupBPOSForNNs(mockDn, mockNN1, mockNN2);
bpos.start();
try {
waitForInitialization(bpos);
List<BPServiceActor> actors = bpos.getBPServiceActors();
assertEquals(1, actors.size());
BPServiceActor actor = actors.get(0);
waitForBlockReport(actor.getNameNodeProxy());
} finally {
bpos.stop();
}
}
示例14: startDataNode
import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics; //导入依赖的package包/类
/**
* This method starts the data node with the specified conf.
*
* @param conf - the configuration
* if conf's CONFIG_PROPERTY_SIMULATED property is set
* then a simulated storage based data node is created.
*
* @param dataDirs - only for a non-simulated storage data node
* @throws IOException
*/
void startDataNode(Configuration conf,
AbstractList<File> dataDirs,
DatanodeProtocol namenode, SecureResources resources
) throws IOException {
if(UserGroupInformation.isSecurityEnabled() && resources == null)
throw new RuntimeException("Cannot start secure cluster without " +
"privileged resources.");
this.secureResources = resources;
this.namenode = namenode;
storage = new DataStorage();
initConfig(conf);
registerMXBean();
initDataXceiver(conf);
initFsDataSet(conf, dataDirs);
initBlockScanner(conf);
startInfoServer(conf);
myMetrics = new DataNodeMetrics(conf, dnRegistration.getName());
// TODO check what code removed here
initIpcServer(conf);
startPlugins(conf);
// BlockTokenSecretManager is created here, but it shouldn't be
// used until it is initialized in register().
this.blockTokenSecretManager = new BlockTokenSecretManager(false, 0, 0);
}
示例15: getMetrics
import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics; //导入依赖的package包/类
public DataNodeMetrics getMetrics() {
return metrics;
}