本文整理汇总了Java中org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitIndex方法的典型用法代码示例。如果您正苦于以下问题:Java JobSplit.TaskSplitIndex方法的具体用法?Java JobSplit.TaskSplitIndex怎么用?Java JobSplit.TaskSplitIndex使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.mapreduce.split.JobSplit
的用法示例。
在下文中一共展示了JobSplit.TaskSplitIndex方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: main
import org.apache.hadoop.mapreduce.split.JobSplit; //导入方法依赖的package包/类
public static void main(String... args) throws IOException {
String taskSplitFile = args[0];
Configuration conf = new Configuration();
DataInputStream is =
new DataInputStream(new FileInputStream(taskSplitFile));
JobSplit.TaskSplitIndex taskSplitIndex = new JobSplit.TaskSplitIndex();
taskSplitIndex.readFields(is);
is.close();
Object split = getSplitDetails(conf,
new Path(taskSplitIndex.getSplitLocation()),
taskSplitIndex.getStartOffset());
System.out.println("InputSplit instance class = " + split.getClass().getName());
System.out.println("ToString on split = " + split);
System.out.println("Reflection fields = " + ToStringBuilder
.reflectionToString(split, ToStringStyle.SHORT_PREFIX_STYLE));
}
示例2: obtainNewMapTask
import org.apache.hadoop.mapreduce.split.JobSplit; //导入方法依赖的package包/类
@Override
public Task obtainNewMapTask(final TaskTrackerStatus tts, int clusterSize,
int ignored) throws IOException {
TaskAttemptID attemptId = getTaskAttemptID(true);
Task task = new MapTask("", attemptId, 0, new JobSplit.TaskSplitIndex(),
1) {
@Override
public String toString() {
return String.format("%s on %s", getTaskID(), tts.getTrackerName());
}
};
taskTrackerManager.update(tts.getTrackerName(), task);
runningMapTasks++;
return task;
}
示例3: obtainNewMapTask
import org.apache.hadoop.mapreduce.split.JobSplit; //导入方法依赖的package包/类
@Override
public Task obtainNewMapTask(final TaskTrackerStatus tts, int clusterSize,
int ignored) throws IOException {
TaskAttemptID attemptId = getTaskAttemptID(TaskType.MAP);
Task task = new MapTask("", attemptId, 0, new JobSplit.TaskSplitIndex(), 1) {
@Override
public String toString() {
return String.format("%s on %s", getTaskID(), tts.getTrackerName());
}
};
taskTrackerManager.update(tts.getTrackerName(), task);
runningMapTasks++;
return task;
}
示例4: testLogTruncation
import org.apache.hadoop.mapreduce.split.JobSplit; //导入方法依赖的package包/类
/**
* Test the truncation of log-file.
*
* It writes two log files and truncates one, does not truncate other.
*
* @throws IOException
*/
@Test
public void testLogTruncation() throws IOException {
Configuration conf = setRetainSizes(1000L, 1000L);
TaskLogsTruncater trunc = new TaskLogsTruncater(conf);
TaskID baseId = new TaskID();
int taskcount = 0;
TaskAttemptID attemptID = new TaskAttemptID(baseId, taskcount++);
Task task = new MapTask(null, attemptID, 0, new JobSplit.TaskSplitIndex(),
0);
// Let the tasks write logs more than retain-size
writeRandomBytes(attemptID, attemptID, LogName.SYSLOG, 1500);
writeRandomBytes(attemptID, attemptID, LogName.STDERR, 500);
File attemptDir = TaskLog.getAttemptDir(attemptID, false);
assertTrue(attemptDir + " doesn't exist!", attemptDir.exists());
// Finish the task and the JVM too.
JVMInfo jvmInfo = new JVMInfo(attemptDir, Arrays.asList(task));
trunc.truncateLogs(jvmInfo);
// The log-file should now be truncated.
assertTrue(attemptDir.exists());
Map<LogName, Long> logLengths = getAllLogsFileLengths(attemptID, false);
File logFile = TaskLog.getTaskLogFile(attemptID, false, LogName.SYSLOG);
assertEquals(1000 + truncatedMsgSize, logFile.length());
// The index file should also be proper.
assertEquals(1000 + truncatedMsgSize, logLengths.get(LogName.SYSLOG)
.longValue());
String syslog = TestMiniMRMapRedDebugScript.readTaskLog(LogName.SYSLOG,
attemptID, false);
assertTrue(syslog.startsWith(TaskLogsTruncater.TRUNCATED_MSG));
logFile = TaskLog.getTaskLogFile(attemptID, false, LogName.STDERR);
assertEquals(500, logFile.length());
// The index file should also be proper.
assertEquals(500, logLengths.get(LogName.STDERR).longValue());
String stderr = TestMiniMRMapRedDebugScript.readTaskLog(LogName.STDERR,
attemptID, false);
assertFalse(stderr.startsWith(TaskLogsTruncater.TRUNCATED_MSG));
// truncate once again
trunc.truncateLogs(jvmInfo);
logLengths = getAllLogsFileLengths(attemptID, false);
logFile = TaskLog.getTaskLogFile(attemptID, false, LogName.SYSLOG);
assertEquals(1000 + truncatedMsgSize, logFile.length());
// The index file should also be proper.
assertEquals(1000 + truncatedMsgSize, logLengths.get(LogName.SYSLOG)
.longValue());
logFile = TaskLog.getTaskLogFile(attemptID, false, LogName.STDERR);
assertEquals(500, logFile.length());
// The index file should also be proper.
assertEquals(500, logLengths.get(LogName.STDERR).longValue());
}
示例5: obtainNewMapTask
import org.apache.hadoop.mapreduce.split.JobSplit; //导入方法依赖的package包/类
@Override
public Task obtainNewMapTask(final TaskTrackerStatus tts, int clusterSize,
int ignored) throws IOException {
boolean areAllMapsRunning = (mapTaskCtr == numMapTasks);
if (areAllMapsRunning){
if(!getJobConf().getMapSpeculativeExecution() ||
speculativeMapTasks > 0) {
return null;
}
}
TaskAttemptID attemptId = getTaskAttemptID(true, areAllMapsRunning);
Task task = new MapTask("", attemptId, 0, new JobSplit.TaskSplitIndex(),
super.numSlotsPerMap) {
@Override
public String toString() {
return String.format("%s on %s", getTaskID(), tts.getTrackerName());
}
};
taskTrackerManager.startTask(tts.getTrackerName(), task);
runningMapTasks++;
// create a fake TIP and keep track of it
FakeTaskInProgress mapTip = new FakeTaskInProgress(getJobID(),
getJobConf(), task, true, this);
mapTip.taskStatus.setRunState(TaskStatus.State.RUNNING);
if(areAllMapsRunning) {
speculativeMapTasks++;
//you have scheduled a speculative map. Now set all tips in the
//map tips not to have speculative task.
for(TaskInProgress t : mapTips) {
if (t instanceof FakeTaskInProgress) {
FakeTaskInProgress mt = (FakeTaskInProgress) t;
mt.hasSpeculativeMap = false;
}
}
} else {
//add only non-speculative tips.
mapTips.add(mapTip);
//add the tips to the JobInProgress TIPS
maps = mapTips.toArray(new TaskInProgress[mapTips.size()]);
}
return task;
}