本文整理汇总了Java中org.apache.hadoop.mapred.UtilsForTests.waitFor方法的典型用法代码示例。如果您正苦于以下问题:Java UtilsForTests.waitFor方法的具体用法?Java UtilsForTests.waitFor怎么用?Java UtilsForTests.waitFor使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.mapred.UtilsForTests
的用法示例。
在下文中一共展示了UtilsForTests.waitFor方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: verifyTTNotBlackListed
import org.apache.hadoop.mapred.UtilsForTests; //导入方法依赖的package包/类
/**
* Will verify that given task tracker is not blacklisted
* @param client tasktracker info
* @param conf modified configuration object
* @param cluster mrcluster instance
* @throws IOException thrown if verification fails
*/
public void verifyTTNotBlackListed(TTClient client, Configuration conf,
MRCluster cluster) throws IOException {
int interval = conf.getInt("mapred.healthChecker.interval",0);
Assert.assertTrue("Interval cannot be zero.",interval != 0);
UtilsForTests.waitFor(interval+2000);
String defaultHealthScript = conf.get("mapred.healthChecker.script.path");
Assert.assertTrue("Task tracker is not healthy",
nodeHealthStatus(client, true) == true);
TaskTrackerStatus status = client.getStatus();
JTClient jclient = cluster.getJTClient();
Assert.assertTrue("Failed to move task tracker to healthy list",
jclient.getProxy().isBlackListed(status.getTrackerName()) == false);
Assert.assertTrue("Health script was not set",defaultHealthScript != null);
}
示例2: verifyTTBlackList
import org.apache.hadoop.mapred.UtilsForTests; //导入方法依赖的package包/类
/**
* Verifies that the given task tracker is blacklisted
* @param conf modified Configuration object
* @param client tasktracker info
* @param errorMessage that needs to be asserted
* @param cluster mr cluster instance
* @throws IOException is thrown when verification fails
*/
public void verifyTTBlackList(Configuration conf, TTClient client,
String errorMessage, MRCluster cluster) throws IOException{
int interval = conf.getInt("mapred.healthChecker.interval",0);
Assert.assertTrue("Interval cannot be zero.",interval != 0);
UtilsForTests.waitFor(interval+2000);
//TaskTrackerStatus status = client.getStatus();
Assert.assertTrue("Task tracker was never blacklisted ",
nodeHealthStatus(client, false) == true);
TaskTrackerStatus status = client.getStatus();
Assert.assertTrue("The custom error message did not appear",
status.getHealthStatus().getHealthReport().trim().
equals(errorMessage));
JTClient jClient = cluster.getJTClient();
Assert.assertTrue("Failed to move task tracker to blacklisted list",
jClient.getProxy().isBlackListed(status.getTrackerName()) == true);
}
示例3: nodeHealthStatus
import org.apache.hadoop.mapred.UtilsForTests; //导入方法依赖的package包/类
/**
* The method return true from the task tracker if it is unhealthy/healthy
* depending the blacklisted status
* @param client the tracker tracker instance
* @param health status information.
* @return status of task tracker
* @throws IOException failed to get the status of task tracker
*/
public boolean nodeHealthStatus(TTClient client,boolean hStatus) throws IOException {
int counter = 0;
TaskTrackerStatus status = client.getStatus();
while (counter < 60) {
LOG.info("isNodeHealthy "+status.getHealthStatus().isNodeHealthy());
if (status.getHealthStatus().isNodeHealthy() == hStatus) {
break;
} else {
UtilsForTests.waitFor(3000);
status = client.getStatus();
Assert.assertNotNull("Task tracker status is null",status);
}
counter++;
}
if(counter != 60) {
return true;
}
return false;
}
示例4: waitTillRunState
import org.apache.hadoop.mapred.UtilsForTests; //导入方法依赖的package包/类
private void waitTillRunState(JobInfo jInfo, JobID jobID,
JTProtocol remoteJTClient) throws Exception {
int count = 0;
while (jInfo != null && jInfo.getStatus().getRunState()
!= JobStatus.RUNNING) {
UtilsForTests.waitFor(10000);
count++;
jInfo = remoteJTClient.getJobInfo(jobID);
//If the count goes beyond 100 seconds, then break; This is to avoid
//infinite loop.
if (count > 10) {
Assert.fail("job has not reached running state for more than" +
"100 seconds. Failing at this point");
}
}
}
示例5: isJobStopped
import org.apache.hadoop.mapred.UtilsForTests; //导入方法依赖的package包/类
/**
* The method provides the information on the job has stopped or not
* @return indicates true if the job has stopped false otherwise.
* @param job id has the information of the running job.
* @throw IOException is thrown if the job info cannot be fetched.
*/
public boolean isJobStopped(JobID id) throws IOException{
int counter = 0;
JobInfo jInfo = getProxy().getJobInfo(id);
if(jInfo != null ) {
while (counter < 60) {
if (jInfo.getStatus().isJobComplete()) {
break;
}
UtilsForTests.waitFor(1000);
jInfo = getProxy().getJobInfo(id);
counter ++;
}
}
return (counter != 60)? true : false;
}
示例6: isJobStarted
import org.apache.hadoop.mapred.UtilsForTests; //导入方法依赖的package包/类
/**
* It uses to check whether job is started or not.
* @param id job id
* @return true if job is running.
* @throws IOException if an I/O error occurs.
*/
public boolean isJobStarted(JobID id) throws IOException {
JobInfo jInfo = getJobInfo(id);
int counter = 0;
while (counter < 60) {
if (jInfo.getStatus().getRunState() == JobStatus.RUNNING) {
break;
} else {
UtilsForTests.waitFor(1000);
jInfo = getJobInfo(jInfo.getID());
Assert.assertNotNull("Job information is null",jInfo);
}
counter++;
}
return (counter != 60)? true : false ;
}
示例7: isTaskStarted
import org.apache.hadoop.mapred.UtilsForTests; //导入方法依赖的package包/类
/**
* It uses to check whether task is started or not.
* @param taskInfo task information
* @return true if task is running.
* @throws IOException if an I/O error occurs.
*/
public boolean isTaskStarted(TaskInfo taskInfo) throws IOException {
JTProtocol wovenClient = getProxy();
int counter = 0;
while (counter < 60) {
if (taskInfo.getTaskStatus().length > 0) {
if (taskInfo.getTaskStatus()[0].getRunState() ==
TaskStatus.State.RUNNING) {
break;
}
}
UtilsForTests.waitFor(1000);
taskInfo = wovenClient.getTaskInfo(taskInfo.getTaskID());
counter++;
}
return (counter != 60)? true : false;
}
示例8: isTaskStopped
import org.apache.hadoop.mapred.UtilsForTests; //导入方法依赖的package包/类
/**
* This methods provides the information on the particular task managed
* by a task tracker has stopped or not.
* @param TaskID is id of the task to get the status.
* @throws IOException if there is an error.
* @return true is stopped.
*/
public boolean isTaskStopped(TaskID tID) throws IOException {
int counter = 0;
if(tID != null && proxy.getTask(tID) != null) {
TaskStatus.State tState= proxy.getTask(tID).getTaskStatus().getRunState();
while ( counter < 60) {
if(tState != TaskStatus.State.RUNNING &&
tState != TaskStatus.State.UNASSIGNED) {
break;
}
UtilsForTests.waitFor(1000);
tState= proxy.getTask(tID).getTaskStatus().getRunState();
counter++;
}
}
return (counter != 60)? true : false;
}
示例9: getTTClientInstance
import org.apache.hadoop.mapred.UtilsForTests; //导入方法依赖的package包/类
/**
* Get a TTClient Instance from a running task <br/>
* @param Task Information of the running task
* @return TTClient instance
* @throws IOException
*/
public TTClient getTTClientInstance(TaskInfo taskInfo)
throws IOException {
JTProtocol remoteJTClient = getJTClient().getProxy();
String [] taskTrackers = taskInfo.getTaskTrackers();
int counter = 0;
TTClient ttClient = null;
while (counter < 60) {
if (taskTrackers.length != 0) {
break;
}
UtilsForTests.waitFor(100);
taskInfo = remoteJTClient.getTaskInfo(taskInfo.getTaskID());
taskTrackers = taskInfo.getTaskTrackers();
counter ++;
}
if ( taskTrackers.length != 0 ) {
String hostName = taskTrackers[0].split("_")[1];
hostName = hostName.split(":")[0];
ttClient = getTTClient(hostName);
}
return ttClient;
}
示例10: testKilledJob
import org.apache.hadoop.mapred.UtilsForTests; //导入方法依赖的package包/类
private void testKilledJob(String fileName,
Class<? extends OutputFormat> output, String[] exclude) throws Exception {
Path outDir = getNewOutputDir();
Job job = MapReduceTestUtil.createKillJob(conf, outDir, inDir);
job.setOutputFormatClass(output);
job.submit();
// wait for the setup to be completed
while (job.setupProgress() != 1.0f) {
UtilsForTests.waitFor(100);
}
job.killJob(); // kill the job
assertFalse("Job did not get kill", job.waitForCompletion(true));
if (fileName != null) {
Path testFile = new Path(outDir, fileName);
assertTrue("File " + testFile + " missing for job " + job.getJobID(), fs
.exists(testFile));
}
// check if the files from the missing set exists
for (String ex : exclude) {
Path file = new Path(outDir, ex);
assertFalse("File " + file + " should not be present for killed job "
+ job.getJobID(), fs.exists(file));
}
}
示例11: setupJob
import org.apache.hadoop.mapred.UtilsForTests; //导入方法依赖的package包/类
@Override
public void setupJob(JobContext context) throws IOException {
FileSystem fs = FileSystem.get(context.getConfiguration());
while (true) {
if (fs.exists(shareDir)) {
break;
}
UtilsForTests.waitFor(100);
}
super.cleanupJob(context);
}
示例12: waitTillReady
import org.apache.hadoop.mapred.UtilsForTests; //导入方法依赖的package包/类
private void waitTillReady(JobInProgress jip, JobConf job) {
// wait for all the maps to get scheduled
while (jip.runningMaps() < job.getNumMapTasks()) {
UtilsForTests.waitFor(10);
}
// wait for all the reducers to get scheduled
while (jip.runningReduces() < job.getNumReduceTasks()) {
UtilsForTests.waitFor(10);
}
}
示例13: testKilledJob
import org.apache.hadoop.mapred.UtilsForTests; //导入方法依赖的package包/类
private void testKilledJob(String fileName,
Class<? extends OutputFormat> output, String[] exclude) throws Exception {
Path outDir = getNewOutputDir();
Job job = MapReduceTestUtil.createKillJob(conf, outDir, inDir);
job.setOutputFormatClass(output);
job.submit();
// wait for the setup to be completed
while (job.setupProgress() != 1.0f) {
UtilsForTests.waitFor(100);
}
job.killJob(); // kill the job
assertFalse("Job did not get kill", job.waitForCompletion(true));
if (fileName != null) {
Path testFile = new Path(outDir, fileName);
assertTrue("File " + testFile + " missing for job ", fs.exists(testFile));
}
// check if the files from the missing set exists
for (String ex : exclude) {
Path file = new Path(outDir, ex);
assertFalse("File " + file + " should not be present for killed job ", fs
.exists(file));
}
}
示例14: testFailedTaskJobStatus
import org.apache.hadoop.mapred.UtilsForTests; //导入方法依赖的package包/类
/**
* Verifying the running job status whether it succeeds or not
* after failing some of its tasks.
*/
@Test
public void testFailedTaskJobStatus() throws IOException,
InterruptedException {
conf = remoteJTClient.getDaemonConf();
TaskInfo taskInfo = null;
SleepJob job = new SleepJob();
job.setConf(conf);
JobConf jobConf = job.setupJobConf(1, 1, 10000, 4000, 100, 100);
RunningJob runJob = jobClient.submitJob(jobConf);
JobID jobId = runJob.getID();
JobInfo jInfo = remoteJTClient.getJobInfo(jobId);
Assert.assertTrue("Job has not been started for 1 min.",
jtClient.isJobStarted(jobId));
TaskInfo[] taskInfos = remoteJTClient.getTaskInfo(jobId);
for (TaskInfo taskinfo : taskInfos) {
if (!taskinfo.isSetupOrCleanup() && taskinfo.getTaskID().isMap()) {
taskInfo = taskinfo;
break;
}
}
Assert.assertTrue("Task has not been started for 1 min.",
jtClient.isTaskStarted(taskInfo));
// Fail the running task.
NetworkedJob networkJob = jobClient.new NetworkedJob(jInfo.getStatus());
TaskID tID = TaskID.downgrade(taskInfo.getTaskID());
TaskAttemptID taskAttID = new TaskAttemptID(tID , 0);
networkJob.killTask(taskAttID, true);
LOG.info("Waiting till the job is completed...");
while (!jInfo.getStatus().isJobComplete()) {
UtilsForTests.waitFor(100);
jInfo = remoteJTClient.getJobInfo(jobId);
}
Assert.assertEquals("JobStatus", JobStatus.SUCCEEDED,
jInfo.getStatus().getRunState());
}
示例15: waitForTTStop
import org.apache.hadoop.mapred.UtilsForTests; //导入方法依赖的package包/类
/**
* Waits till this Tasktracker daemon process is stopped <br/>
*
* @return void
* @throws IOException
*/
public void waitForTTStop() throws IOException {
LOG.info("Waiting for Tasktracker:" + getHostName()
+ " to stop.....");
while (true) {
try {
ping();
LOG.debug(getHostName() +" is waiting state to stop.");
UtilsForTests.waitFor(10000);
} catch (Exception exp) {
LOG.info("TaskTracker : " + getHostName() + " is stopped...");
break;
}
}
}