本文整理汇总了Java中org.apache.hadoop.test.GenericTestUtils.waitFor方法的典型用法代码示例。如果您正苦于以下问题:Java GenericTestUtils.waitFor方法的具体用法?Java GenericTestUtils.waitFor怎么用?Java GenericTestUtils.waitFor使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.test.GenericTestUtils
的用法示例。
在下文中一共展示了GenericTestUtils.waitFor方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testConfIsUnsetAsync
import org.apache.hadoop.test.GenericTestUtils; //导入方法依赖的package包/类
/**
* Ensure that {@link ReconfigurableBase#startReconfigurationTask} unsets the
* property in its parent's configuration when the new value is null.
* @throws IOException
*/
@Test (timeout=300000)
public void testConfIsUnsetAsync() throws ReconfigurationException,
IOException, TimeoutException, InterruptedException {
final String property = "FOO";
final String value1 = "value1";
final Configuration conf = new Configuration();
conf.set(property, value1);
final Configuration newConf = new Configuration();
final ReconfigurableBase reconfigurable = makeReconfigurable(
conf, newConf, Arrays.asList(property));
// Kick off a reconfiguration task and wait until it completes.
reconfigurable.startReconfigurationTask();
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
return reconfigurable.getReconfigurationTaskStatus().stopped();
}
}, 100, 60000);
assertNull(reconfigurable.getConf().get(property));
}
示例2: testUncacheUnknownBlock
import org.apache.hadoop.test.GenericTestUtils; //导入方法依赖的package包/类
@Test(timeout=60000)
public void testUncacheUnknownBlock() throws Exception {
// Create a file
Path fileName = new Path("/testUncacheUnknownBlock");
int fileLen = 4096;
DFSTestUtil.createFile(fs, fileName, fileLen, (short)1, 0xFDFD);
HdfsBlockLocation[] locs = (HdfsBlockLocation[])fs.getFileBlockLocations(
fileName, 0, fileLen);
// Try to uncache it without caching it first
setHeartbeatResponse(uncacheBlocks(locs));
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
return fsd.getNumBlocksFailedToUncache() > 0;
}
}, 100, 10000);
}
示例3: schedule
import org.apache.hadoop.test.GenericTestUtils; //导入方法依赖的package包/类
public List<TaskAttemptContainerAssignedEvent> schedule()
throws Exception {
// before doing heartbeat with RM, drain all the outstanding events to
// ensure all the requests before this heartbeat is to be handled
GenericTestUtils.waitFor(new Supplier<Boolean>() {
public Boolean get() {
return eventQueue.isEmpty();
}
}, 100, 10000);
// run the scheduler
try {
super.heartbeat();
} catch (Exception e) {
LOG.error("error in heartbeat ", e);
throw new YarnRuntimeException(e);
}
List<TaskAttemptContainerAssignedEvent> result
= new ArrayList<TaskAttemptContainerAssignedEvent>(events);
events.clear();
return result;
}
示例4: testEditLogAutoroll
import org.apache.hadoop.test.GenericTestUtils; //导入方法依赖的package包/类
@Test(timeout=60000)
public void testEditLogAutoroll() throws Exception {
// Make some edits
final long startTxId = editLog.getCurSegmentTxId();
for (int i=0; i<11; i++) {
fs.mkdirs(new Path("testEditLogAutoroll-" + i));
}
// Wait for the NN to autoroll
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
return editLog.getCurSegmentTxId() > startTxId;
}
}, 1000, 5000);
// Transition to standby and make sure the roller stopped
nn0.transitionToStandby();
GenericTestUtils.assertNoThreadsMatching(
".*" + NameNodeEditLogRoller.class.getSimpleName() + ".*");
}
示例5: waitForBlockReceived
import org.apache.hadoop.test.GenericTestUtils; //导入方法依赖的package包/类
private ReceivedDeletedBlockInfo[] waitForBlockReceived(
final ExtendedBlock fakeBlock,
final DatanodeProtocolClientSideTranslatorPB mockNN) throws Exception {
final String fakeBlockPoolId = fakeBlock.getBlockPoolId();
final ArgumentCaptor<StorageReceivedDeletedBlocks[]> captor =
ArgumentCaptor.forClass(StorageReceivedDeletedBlocks[].class);
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
try {
Mockito.verify(mockNN).blockReceivedAndDeleted(
Mockito.<DatanodeRegistration>anyObject(),
Mockito.eq(fakeBlockPoolId),
captor.capture());
return true;
} catch (Throwable t) {
return false;
}
}
}, 100, 10000);
return captor.getValue()[0].getBlocks();
}
示例6: testSpaceReleasedOnUnexpectedEof
import org.apache.hadoop.test.GenericTestUtils; //导入方法依赖的package包/类
/**
* Ensure that reserved space is released when the client goes away
* unexpectedly.
*
* The verification is done for each replica in the write pipeline.
*
* @throws IOException
*/
@Test(timeout=300000)
public void testSpaceReleasedOnUnexpectedEof()
throws IOException, InterruptedException, TimeoutException {
final short replication = 3;
startCluster(BLOCK_SIZE, replication, -1);
final String methodName = GenericTestUtils.getMethodName();
final Path file = new Path("/" + methodName + ".01.dat");
// Write 1 byte to the file and kill the writer.
FSDataOutputStream os = fs.create(file, replication);
os.write(new byte[1]);
os.hsync();
DFSTestUtil.abortStream((DFSOutputStream) os.getWrappedStream());
// Ensure all space reserved for the replica was released on each
// DataNode.
for (DataNode dn : cluster.getDataNodes()) {
final FsVolumeImpl volume = (FsVolumeImpl) dn.getFSDataset().getVolumes().get(0);
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
return (volume.getReservedForRbw() == 0);
}
}, 500, Integer.MAX_VALUE); // Wait until the test times out.
}
}
示例7: waitForInitialization
import org.apache.hadoop.test.GenericTestUtils; //导入方法依赖的package包/类
private void waitForInitialization(final BPOfferService bpos)
throws Exception {
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
return bpos.isAlive() && bpos.isInitialized();
}
}, 100, 10000);
}
示例8: verifyExpectedCacheUsage
import org.apache.hadoop.test.GenericTestUtils; //导入方法依赖的package包/类
/**
* Blocks until cache usage hits the expected new value.
*/
public static long verifyExpectedCacheUsage(final long expectedCacheUsed,
final long expectedBlocks, final FsDatasetSpi<?> fsd) throws Exception {
GenericTestUtils.waitFor(new Supplier<Boolean>() {
private int tries = 0;
@Override
public Boolean get() {
long curCacheUsed = fsd.getCacheUsed();
long curBlocks = fsd.getNumBlocksCached();
if ((curCacheUsed != expectedCacheUsed) ||
(curBlocks != expectedBlocks)) {
if (tries++ > 10) {
LOG.info("verifyExpectedCacheUsage: have " +
curCacheUsed + "/" + expectedCacheUsed + " bytes cached; " +
curBlocks + "/" + expectedBlocks + " blocks cached. " +
"memlock limit = " +
NativeIO.POSIX.getCacheManipulator().getMemlockLimit() +
". Waiting...");
}
return false;
}
LOG.info("verifyExpectedCacheUsage: got " +
curCacheUsed + "/" + expectedCacheUsed + " bytes cached; " +
curBlocks + "/" + expectedBlocks + " blocks cached. " +
"memlock limit = " +
NativeIO.POSIX.getCacheManipulator().getMemlockLimit());
return true;
}
}, 100, 60000);
return expectedCacheUsed;
}
示例9: testSlowReader
import org.apache.hadoop.test.GenericTestUtils; //导入方法依赖的package包/类
/**
* Test for the case where the client beings to read a long block, but doesn't
* read bytes off the stream quickly. The datanode should time out sending the
* chunks and the transceiver should die, even if it has a long keepalive.
*/
@Test(timeout=300000)
public void testSlowReader() throws Exception {
// Set a client socket cache expiry time much longer than
// the datanode-side expiration time.
final long CLIENT_EXPIRY_MS = 600000L;
Configuration clientConf = new Configuration(conf);
clientConf.setLong(DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY, CLIENT_EXPIRY_MS);
clientConf.set(DFS_CLIENT_CONTEXT, "testSlowReader");
DistributedFileSystem fs =
(DistributedFileSystem)FileSystem.get(cluster.getURI(),
clientConf);
// Restart the DN with a shorter write timeout.
DataNodeProperties props = cluster.stopDataNode(0);
props.conf.setInt(DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,
WRITE_TIMEOUT);
props.conf.setInt(DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY,
120000);
assertTrue(cluster.restartDataNode(props, true));
dn = cluster.getDataNodes().get(0);
// Wait for heartbeats to avoid a startup race where we
// try to write the block while the DN is still starting.
cluster.triggerHeartbeats();
DFSTestUtil.createFile(fs, TEST_FILE, 1024*1024*8L, (short)1, 0L);
FSDataInputStream stm = fs.open(TEST_FILE);
stm.read();
assertXceiverCount(1);
GenericTestUtils.waitFor(new Supplier<Boolean>() {
public Boolean get() {
// DN should time out in sendChunks, and this should force
// the xceiver to exit.
return getXceiverCountWithoutServer() == 0;
}
}, 500, 50000);
IOUtils.closeStream(stm);
}
示例10: testBNInSync
import org.apache.hadoop.test.GenericTestUtils; //导入方法依赖的package包/类
private void testBNInSync(MiniDFSCluster cluster, final BackupNode backup,
int testIdx) throws Exception {
final NameNode nn = cluster.getNameNode();
final FileSystem fs = cluster.getFileSystem();
// Do a bunch of namespace operations, make sure they're replicated
// to the BN.
for (int i = 0; i < 10; i++) {
final String src = "/test_" + testIdx + "_" + i;
LOG.info("Creating " + src + " on NN");
Path p = new Path(src);
assertTrue(fs.mkdirs(p));
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
LOG.info("Checking for " + src + " on BN");
try {
boolean hasFile = backup.getNamesystem().getFileInfo(src, false) != null;
boolean txnIdMatch =
backup.getRpcServer().getTransactionID() ==
nn.getRpcServer().getTransactionID();
return hasFile && txnIdMatch;
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}, 30, 10000);
}
assertStorageDirsMatch(nn, backup);
}
示例11: waitForHAState
import org.apache.hadoop.test.GenericTestUtils; //导入方法依赖的package包/类
private void waitForHAState(int nnidx, final HAServiceState state)
throws TimeoutException, InterruptedException {
final NameNode nn = cluster.getNameNode(nnidx);
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
try {
return nn.getRpcServer().getServiceStatus().getState() == state;
} catch (Exception e) {
e.printStackTrace();
return false;
}
}
}, 50, 15000);
}
示例12: testNoPopulatingReplQueuesWhenExitingSafemode
import org.apache.hadoop.test.GenericTestUtils; //导入方法依赖的package包/类
/**
* Regression test for HDFS-2804: standby should not populate replication
* queues when exiting safe mode.
*/
@Test
public void testNoPopulatingReplQueuesWhenExitingSafemode() throws Exception {
DFSTestUtil.createFile(fs, new Path("/test"), 15*BLOCK_SIZE, (short)3, 1L);
HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
// get some blocks in the SBN's image
nn1.getRpcServer().setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
NameNodeAdapter.saveNamespace(nn1);
nn1.getRpcServer().setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false);
// and some blocks in the edit logs
DFSTestUtil.createFile(fs, new Path("/test2"), 15*BLOCK_SIZE, (short)3, 1L);
nn0.getRpcServer().rollEditLog();
cluster.stopDataNode(1);
cluster.shutdownNameNode(1);
//Configuration sbConf = cluster.getConfiguration(1);
//sbConf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, 1);
cluster.restartNameNode(1, false);
nn1 = cluster.getNameNode(1);
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
return !nn1.isInSafeMode();
}
}, 100, 10000);
BlockManagerTestUtil.updateState(nn1.getNamesystem().getBlockManager());
assertEquals(0L, nn1.getNamesystem().getUnderReplicatedBlocks());
assertEquals(0L, nn1.getNamesystem().getPendingReplicationBlocks());
}
示例13: waitForNNToIssueDeletions
import org.apache.hadoop.test.GenericTestUtils; //导入方法依赖的package包/类
/**
* Wait for the NameNode to issue any deletions that are already
* pending (i.e. for the pendingDeletionBlocksCount to go to 0)
*/
public static void waitForNNToIssueDeletions(final NameNode nn)
throws Exception {
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
LOG.info("Waiting for NN to issue block deletions to DNs");
return nn.getNamesystem().getBlockManager().getPendingDeletionBlocksCount() == 0;
}
}, 250, 10000);
}
示例14: testCorruptBlockHandling
import org.apache.hadoop.test.GenericTestUtils; //导入方法依赖的package包/类
@Test(timeout=120000)
public void testCorruptBlockHandling() throws Exception {
Configuration conf = new Configuration();
conf.setLong(DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, 100L);
conf.set(INTERNAL_VOLUME_SCANNER_SCAN_RESULT_HANDLER,
TestScanResultHandler.class.getName());
final TestContext ctx = new TestContext(conf, 1);
final int NUM_EXPECTED_BLOCKS = 5;
final int CORRUPT_INDEX = 3;
ctx.createFiles(0, NUM_EXPECTED_BLOCKS, 4);
ExtendedBlock badBlock = ctx.getFileBlock(0, CORRUPT_INDEX);
ctx.cluster.corruptBlockOnDataNodes(badBlock);
final TestScanResultHandler.Info info =
TestScanResultHandler.getInfo(ctx.volumes.get(0));
synchronized (info) {
info.shouldRun = true;
info.notify();
}
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
synchronized (info) {
return info.blocksScanned == NUM_EXPECTED_BLOCKS;
}
}
}, 3, 30000);
synchronized (info) {
assertTrue(info.badBlocks.contains(badBlock));
for (int i = 0; i < NUM_EXPECTED_BLOCKS; i++) {
if (i != CORRUPT_INDEX) {
ExtendedBlock block = ctx.getFileBlock(0, i);
assertTrue(info.goodBlocks.contains(block));
}
}
}
ctx.close();
}
示例15: waitForTrueReplication
import org.apache.hadoop.test.GenericTestUtils; //导入方法依赖的package包/类
private void waitForTrueReplication(final MiniDFSCluster cluster,
final ExtendedBlock block, final int waitFor) throws Exception {
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
try {
return getTrueReplication(cluster, block) == waitFor;
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}, 500, 10000);
}