本文整理汇总了Java中org.apache.hadoop.fi.FiTestUtil.getMethodName方法的典型用法代码示例。如果您正苦于以下问题:Java FiTestUtil.getMethodName方法的具体用法?Java FiTestUtil.getMethodName怎么用?Java FiTestUtil.getMethodName使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.fi.FiTestUtil
的用法示例。
在下文中一共展示了FiTestUtil.getMethodName方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: hFlushFi01_c
import org.apache.hadoop.fi.FiTestUtil; //导入方法依赖的package包/类
/** Similar to {@link #hFlushFi01_b()} but writing happens
* across block and checksum's boundaries
*/
@Test
public void hFlushFi01_c() throws Exception {
final String methodName = FiTestUtil.getMethodName();
Configuration conf = new HdfsConfiguration();
int customPerChecksumSize = 400;
int customBlockSize = customPerChecksumSize * 3;
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);
runDiskErrorTest(conf, methodName,
customBlockSize, new DerrAction(methodName, 0), 0, true);
}
示例2: hFlushFi02_c
import org.apache.hadoop.fi.FiTestUtil; //导入方法依赖的package包/类
/** Similar to {@link #hFlushFi01_c()} but for a pipeline's 1st datanode
*/
@Test
public void hFlushFi02_c() throws IOException {
final String methodName = FiTestUtil.getMethodName();
Configuration conf = new HdfsConfiguration();
int customPerChecksumSize = 400;
int customBlockSize = customPerChecksumSize * 3;
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);
runDiskErrorTest(conf, methodName,
customBlockSize, new DerrAction(methodName, 1), 1, true);
}
示例3: hFlushFi02_a
import org.apache.hadoop.fi.FiTestUtil; //导入方法依赖的package包/类
/** Similar to {@link #hFlushFi01_a()} but for a pipeline's 1st datanode
*/
@Test
public void hFlushFi02_a() throws IOException {
final String methodName = FiTestUtil.getMethodName();
runDiskErrorTest(new HdfsConfiguration(), methodName,
AppendTestUtil.BLOCK_SIZE, new DerrAction(methodName, 1), 1, false);
}
示例4: hFlushFi02_b
import org.apache.hadoop.fi.FiTestUtil; //导入方法依赖的package包/类
/** Similar to {@link #hFlushFi01_b()} but for a pipeline's 1st datanode
*/
@Test
public void hFlushFi02_b() throws IOException {
final String methodName = FiTestUtil.getMethodName();
Configuration conf = new HdfsConfiguration();
int customPerChecksumSize = 512;
int customBlockSize = customPerChecksumSize * 3;
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);
runDiskErrorTest(conf, methodName,
customBlockSize, new DerrAction(methodName, 1), 1, true);
}
示例5: hFlushFi03_c
import org.apache.hadoop.fi.FiTestUtil; //导入方法依赖的package包/类
/** Similar to {@link #hFlushFi01_c()} but for a pipeline's 2nd datanode
*/
@Test
public void hFlushFi03_c() throws IOException {
final String methodName = FiTestUtil.getMethodName();
Configuration conf = new HdfsConfiguration();
int customPerChecksumSize = 400;
int customBlockSize = customPerChecksumSize * 3;
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);
runDiskErrorTest(conf, methodName,
customBlockSize, new DerrAction(methodName, 2), 2, true);
}
示例6: hFlushFi03_b
import org.apache.hadoop.fi.FiTestUtil; //导入方法依赖的package包/类
/** Similar to {@link #hFlushFi01_b()} but for a pipeline's 2nd datanode
*/
@Test
public void hFlushFi03_b() throws IOException {
final String methodName = FiTestUtil.getMethodName();
Configuration conf = new HdfsConfiguration();
int customPerChecksumSize = 512;
int customBlockSize = customPerChecksumSize * 3;
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);
runDiskErrorTest(conf, methodName,
customBlockSize, new DerrAction(methodName, 2), 2, true);
}
示例7: hFlushFi03_a
import org.apache.hadoop.fi.FiTestUtil; //导入方法依赖的package包/类
/** Similar to {@link #hFlushFi01_a()} but for a pipeline's 2nd datanode
*/
@Test
public void hFlushFi03_a() throws IOException {
final String methodName = FiTestUtil.getMethodName();
runDiskErrorTest(new HdfsConfiguration(), methodName,
AppendTestUtil.BLOCK_SIZE, new DerrAction(methodName, 2), 2, false);
}
示例8: pipeline_Fi_20
import org.apache.hadoop.fi.FiTestUtil; //导入方法依赖的package包/类
/**
* Streaming: Client writes several packets with DN0 very slow. Client
* finishes write successfully.
*/
@Test
public void pipeline_Fi_20() throws IOException {
final String methodName = FiTestUtil.getMethodName();
FiTestUtil.LOG.info("Running " + methodName + " ...");
final DataTransferTest t = (DataTransferTest) DataTransferTestUtil
.initTest();
initSlowDatanodeTest(t, new SleepAction(methodName, 0, MAX_SLEEP));
writeSeveralPackets(methodName);
}
示例9: pipeline_Fi_22
import org.apache.hadoop.fi.FiTestUtil; //导入方法依赖的package包/类
/**
* Streaming: Client writes several packets with DN2 very slow. Client
* finishes write successfully.
*/
@Test
public void pipeline_Fi_22() throws IOException {
final String methodName = FiTestUtil.getMethodName();
FiTestUtil.LOG.info("Running " + methodName + " ...");
final DataTransferTest t =
(DataTransferTest) DataTransferTestUtil.initTest();
initSlowDatanodeTest(t, new SleepAction(methodName, 2, MAX_SLEEP));
writeSeveralPackets(methodName);
}
示例10: hFlushFi03_b
import org.apache.hadoop.fi.FiTestUtil; //导入方法依赖的package包/类
/**
* Similar to {@link #hFlushFi01_b()} but for a pipeline's 2nd datanode
*/
@Test
public void hFlushFi03_b() throws IOException {
final String methodName = FiTestUtil.getMethodName();
Configuration conf = new HdfsConfiguration();
int customPerChecksumSize = 512;
int customBlockSize = customPerChecksumSize * 3;
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY,
customPerChecksumSize);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);
runDiskErrorTest(conf, methodName, customBlockSize,
new DerrAction(methodName, 2), 2, true);
}
示例11: hFlushFi01_b
import org.apache.hadoop.fi.FiTestUtil; //导入方法依赖的package包/类
/** The tests calls
* {@link #runDiskErrorTest(Configuration, String, int, DerrAction, int, boolean)}
* to make a number of writes across a block boundaries.
* hflush() is called after each write() during a pipeline life time.
* Thus, injected fault ought to be triggered for 0th datanode
*/
@Test
public void hFlushFi01_b() throws IOException {
final String methodName = FiTestUtil.getMethodName();
Configuration conf = new HdfsConfiguration();
int customPerChecksumSize = 512;
int customBlockSize = customPerChecksumSize * 3;
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);
runDiskErrorTest(conf, methodName,
customBlockSize, new DerrAction(methodName, 0), 0, true);
}
示例12: pipeline_Fi_22
import org.apache.hadoop.fi.FiTestUtil; //导入方法依赖的package包/类
/**
* Streaming: Client writes several packets with DN2 very slow. Client
* finishes write successfully.
*/
@Test
public void pipeline_Fi_22() throws IOException {
final String methodName = FiTestUtil.getMethodName();
FiTestUtil.LOG.info("Running " + methodName + " ...");
final DataTransferTest t = (DataTransferTest) DataTransferTestUtil
.initTest();
initSlowDatanodeTest(t, new SleepAction(methodName, 2, MAX_SLEEP));
writeSeveralPackets(methodName);
}
示例13: pipeline_Fi_01
import org.apache.hadoop.fi.FiTestUtil; //导入方法依赖的package包/类
/**
* Pipeline setup:
* DN0 never responses after received setup request from client.
* Client gets an IOException and determine DN0 bad.
*/
@Test
public void pipeline_Fi_01() throws IOException {
final String methodName = FiTestUtil.getMethodName();
runReceiverOpWriteBlockTest(methodName, 0, new SleepAction(methodName, 0, 0));
}
示例14: pipeline_Fi_02
import org.apache.hadoop.fi.FiTestUtil; //导入方法依赖的package包/类
/**
* Pipeline setup:
* DN1 never responses after received setup request from client.
* Client gets an IOException and determine DN1 bad.
*/
@Test
public void pipeline_Fi_02() throws IOException {
final String methodName = FiTestUtil.getMethodName();
runReceiverOpWriteBlockTest(methodName, 1, new SleepAction(methodName, 1, 0));
}
示例15: pipeline_Fi_05
import org.apache.hadoop.fi.FiTestUtil; //导入方法依赖的package包/类
/**
* Pipeline setup, DN0 never responses after received setup ack from DN1.
* Client gets an IOException and determine DN0 bad.
*/
@Test
public void pipeline_Fi_05() throws IOException {
final String methodName = FiTestUtil.getMethodName();
runStatusReadTest(methodName, 0, new SleepAction(methodName, 0, 0));
}