本文整理汇总了Java中org.apache.hadoop.fi.FiTestUtil类的典型用法代码示例。如果您正苦于以下问题:Java FiTestUtil类的具体用法?Java FiTestUtil怎么用?Java FiTestUtil使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
FiTestUtil类属于org.apache.hadoop.fi包,在下文中一共展示了FiTestUtil类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: run
import org.apache.hadoop.fi.FiTestUtil; //导入依赖的package包/类
@Override
public void run(NodeBytes nb) throws IOException {
synchronized (rcv) {
rcv.add(nb);
for (NodeBytes n : rcv) {
long counterPartsBytes = -1;
NodeBytes counterPart = null;
if (ack.size() > rcv.indexOf(n)) {
counterPart = ack.get(rcv.indexOf(n));
counterPartsBytes = counterPart.bytes;
}
assertTrue("FI: Wrong receiving length",
counterPartsBytes <= n.bytes);
if(FiTestUtil.LOG.isDebugEnabled()) {
FiTestUtil.LOG.debug("FI: before compare of Recv bytes. Expected "
+ n.bytes + ", got " + counterPartsBytes);
}
}
}
}
示例2: run
import org.apache.hadoop.fi.FiTestUtil; //导入依赖的package包/类
@Override
public void run(NodeBytes nb) throws IOException {
synchronized (rcv) {
rcv.add(nb);
for (NodeBytes n : rcv) {
long counterPartsBytes = -1;
NodeBytes counterPart = null;
if (ack.size() > rcv.indexOf(n)) {
counterPart = ack.get(rcv.indexOf(n));
counterPartsBytes = counterPart.bytes;
}
assertTrue("FI: Wrong receiving length",
counterPartsBytes <= n.bytes);
if (FiTestUtil.LOG.isDebugEnabled()) {
FiTestUtil.LOG.debug(
"FI: before compare of Recv bytes. Expected " + n.bytes +
", got " + counterPartsBytes);
}
}
}
}
示例3: run
import org.apache.hadoop.fi.FiTestUtil; //导入依赖的package包/类
/**
* {@inheritDoc}
*/
public void run(NodeBytes nb) throws IOException {
synchronized (ack) {
ack.add(nb);
for (NodeBytes n : ack) {
NodeBytes counterPart = null;
long counterPartsBytes = -1;
if (rcv.size() > ack.indexOf(n)) {
counterPart = rcv.get(ack.indexOf(n));
counterPartsBytes = counterPart.bytes;
}
assertTrue("FI: Wrong acknowledged length",
counterPartsBytes == n.bytes);
if(FiTestUtil.LOG.isDebugEnabled()) {
FiTestUtil.LOG.debug(
"FI: before compare of Acked bytes. Expected " +
n.bytes + ", got " + counterPartsBytes);
}
}
}
}
示例4: initLoggers
import org.apache.hadoop.fi.FiTestUtil; //导入依赖的package包/类
private static void initLoggers() {
((Log4JLogger) NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
((Log4JLogger) LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
((Log4JLogger) DataNode.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger) TestFiPipelines.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger) DFSClient.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger) FiTestUtil.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger) BlockReceiverAspects.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger) DFSClientAspects.LOG).getLogger().setLevel(Level.ALL);
}
示例5: runSlowDatanodeTest
import org.apache.hadoop.fi.FiTestUtil; //导入依赖的package包/类
private static void runSlowDatanodeTest(String methodName, SleepAction a
) throws IOException {
FiTestUtil.LOG.info("Running " + methodName + " ...");
final DataTransferTest t = (DataTransferTest)DataTransferTestUtil.initTest();
t.fiCallReceivePacket.set(a);
t.fiReceiverOpWriteBlock.set(a);
t.fiStatusRead.set(a);
write1byte(methodName);
}
示例6: runReceiverOpWriteBlockTest
import org.apache.hadoop.fi.FiTestUtil; //导入依赖的package包/类
private static void runReceiverOpWriteBlockTest(String methodName,
int errorIndex, Action<DatanodeID, IOException> a) throws IOException {
FiTestUtil.LOG.info("Running " + methodName + " ...");
final DataTransferTest t = (DataTransferTest) DataTransferTestUtil
.initTest();
t.fiReceiverOpWriteBlock.set(a);
t.fiPipelineInitErrorNonAppend.set(new VerificationAction(methodName,
errorIndex));
write1byte(methodName);
Assert.assertTrue(t.isSuccess());
}
示例7: runStatusReadTest
import org.apache.hadoop.fi.FiTestUtil; //导入依赖的package包/类
private static void runStatusReadTest(String methodName, int errorIndex,
Action<DatanodeID, IOException> a) throws IOException {
FiTestUtil.LOG.info("Running " + methodName + " ...");
final DataTransferTest t = (DataTransferTest) DataTransferTestUtil
.initTest();
t.fiStatusRead.set(a);
t.fiPipelineInitErrorNonAppend.set(new VerificationAction(methodName,
errorIndex));
write1byte(methodName);
Assert.assertTrue(t.isSuccess());
}
示例8: runCallWritePacketToDisk
import org.apache.hadoop.fi.FiTestUtil; //导入依赖的package包/类
private static void runCallWritePacketToDisk(String methodName,
int errorIndex, Action<DatanodeID, IOException> a) throws IOException {
FiTestUtil.LOG.info("Running " + methodName + " ...");
final DataTransferTest t = (DataTransferTest)DataTransferTestUtil.initTest();
t.fiCallWritePacketToDisk.set(a);
t.fiPipelineErrorAfterInit.set(new VerificationAction(methodName, errorIndex));
write1byte(methodName);
Assert.assertTrue(t.isSuccess());
}
示例9: runPipelineCloseTest
import org.apache.hadoop.fi.FiTestUtil; //导入依赖的package包/类
private static void runPipelineCloseTest(String methodName,
Action<DatanodeID, IOException> a) throws IOException {
FiTestUtil.LOG.info("Running " + methodName + " ...");
final DataTransferTest t = (DataTransferTest) DataTransferTestUtil
.initTest();
t.fiPipelineClose.set(a);
TestFiDataTransferProtocol.write1byte(methodName);
}
示例10: runPipelineCloseAck
import org.apache.hadoop.fi.FiTestUtil; //导入依赖的package包/类
private static void runPipelineCloseAck(String name, int i, DataNodeAction a
) throws IOException {
FiTestUtil.LOG.info("Running " + name + " ...");
final DataTransferTest t = (DataTransferTest)DataTransferTestUtil.initTest();
final MarkerConstraint marker = new MarkerConstraint(name);
t.fiPipelineClose.set(new DatanodeMarkingAction(name, i, marker));
t.fiPipelineAck.set(new ConstraintSatisfactionAction<DatanodeID, IOException>(a, marker));
TestFiDataTransferProtocol.write1byte(name);
}
示例11: runBlockFileCloseTest
import org.apache.hadoop.fi.FiTestUtil; //导入依赖的package包/类
private static void runBlockFileCloseTest(String methodName,
Action<DatanodeID, IOException> a) throws IOException {
FiTestUtil.LOG.info("Running " + methodName + " ...");
final DataTransferTest t = (DataTransferTest) DataTransferTestUtil
.initTest();
t.fiBlockFileClose.set(a);
TestFiDataTransferProtocol.write1byte(methodName);
}
示例12: writeSeveralPackets
import org.apache.hadoop.fi.FiTestUtil; //导入依赖的package包/类
/**
* 1. create files with dfs
* 2. write MIN_N_PACKET to MAX_N_PACKET packets
* 3. close file
* 4. open the same file
* 5. read the bytes and compare results
*/
private static void writeSeveralPackets(String methodName) throws IOException {
final Random r = FiTestUtil.RANDOM.get();
final int nPackets = FiTestUtil.nextRandomInt(MIN_N_PACKET, MAX_N_PACKET + 1);
final int lastPacketSize = FiTestUtil.nextRandomInt(1, PACKET_SIZE + 1);
final int size = (nPackets - 1)*PACKET_SIZE + lastPacketSize;
FiTestUtil.LOG.info("size=" + size + ", nPackets=" + nPackets
+ ", lastPacketSize=" + lastPacketSize);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf
).numDataNodes(REPLICATION + 2).build();
final FileSystem dfs = cluster.getFileSystem();
try {
final Path p = new Path("/" + methodName + "/foo");
final FSDataOutputStream out = createFile(dfs, p);
final long seed = r.nextLong();
final Random ran = new Random(seed);
ran.nextBytes(bytes);
out.write(bytes, 0, size);
out.close();
final FSDataInputStream in = dfs.open(p);
int totalRead = 0;
int nRead = 0;
while ((nRead = in.read(toRead, totalRead, size - totalRead)) > 0) {
totalRead += nRead;
}
Assert.assertEquals("Cannot read file.", size, totalRead);
for (int i = 0; i < size; i++) {
Assert.assertTrue("File content differ.", bytes[i] == toRead[i]);
}
}
finally {
dfs.close();
cluster.shutdown();
}
}
示例13: runTest17_19
import org.apache.hadoop.fi.FiTestUtil; //导入依赖的package包/类
private void runTest17_19(String methodName, int dnIndex)
throws IOException {
FiTestUtil.LOG.info("Running " + methodName + " ...");
final DataTransferTest t = (DataTransferTest) DataTransferTestUtil
.initTest();
initSlowDatanodeTest(t, new SleepAction(methodName, 0, 0, MAX_SLEEP));
initSlowDatanodeTest(t, new SleepAction(methodName, 1, 0, MAX_SLEEP));
initSlowDatanodeTest(t, new SleepAction(methodName, 2, 0, MAX_SLEEP));
t.fiCallWritePacketToDisk.set(new CountdownDoosAction(methodName, dnIndex, 3));
t.fiPipelineErrorAfterInit.set(new VerificationAction(methodName, dnIndex));
writeSeveralPackets(methodName);
Assert.assertTrue(t.isSuccess());
}
示例14: runTest29_30
import org.apache.hadoop.fi.FiTestUtil; //导入依赖的package包/类
private void runTest29_30(String methodName, int dnIndex) throws IOException {
FiTestUtil.LOG.info("Running " + methodName + " ...");
final DataTransferTest t = (DataTransferTest) DataTransferTestUtil
.initTest();
initSlowDatanodeTest(t, new SleepAction(methodName, 0, 0, MAX_SLEEP));
initSlowDatanodeTest(t, new SleepAction(methodName, 1, 0, MAX_SLEEP));
initSlowDatanodeTest(t, new SleepAction(methodName, 2, 0, MAX_SLEEP));
t.fiAfterDownstreamStatusRead.set(new CountdownOomAction(methodName, dnIndex, 3));
t.fiPipelineErrorAfterInit.set(new VerificationAction(methodName, dnIndex));
writeSeveralPackets(methodName);
Assert.assertTrue(t.isSuccess());
}
示例15: runTest34_35
import org.apache.hadoop.fi.FiTestUtil; //导入依赖的package包/类
private void runTest34_35(String methodName, int dnIndex) throws IOException {
FiTestUtil.LOG.info("Running " + methodName + " ...");
final DataTransferTest t = (DataTransferTest) DataTransferTestUtil
.initTest();
t.fiAfterDownstreamStatusRead.set(new CountdownSleepAction(methodName, dnIndex, 0, 3));
t.fiPipelineErrorAfterInit.set(new VerificationAction(methodName, dnIndex));
writeSeveralPackets(methodName);
Assert.assertTrue(t.isSuccess());
}