本文整理汇总了Java中org.apache.hadoop.mapred.TaskLog类的典型用法代码示例。如果您正苦于以下问题:Java TaskLog类的具体用法?Java TaskLog怎么用?Java TaskLog使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
TaskLog类属于org.apache.hadoop.mapred包,在下文中一共展示了TaskLog类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: MRAppMaster
import org.apache.hadoop.mapred.TaskLog; //导入依赖的package包/类
public MRAppMaster(ApplicationAttemptId applicationAttemptId,
ContainerId containerId, String nmHost, int nmPort, int nmHttpPort,
Clock clock, long appSubmitTime) {
super(MRAppMaster.class.getName());
this.clock = clock;
this.startTime = clock.getTime();
this.appSubmitTime = appSubmitTime;
this.appAttemptID = applicationAttemptId;
this.containerID = containerId;
this.nmHost = nmHost;
this.nmPort = nmPort;
this.nmHttpPort = nmHttpPort;
this.metrics = MRAppMetrics.create();
logSyncer = TaskLog.createLogSyncer();
LOG.info("Created MRAppMaster for application " + applicationAttemptId);
}
示例2: copyOriginalIndexFileInfo
import org.apache.hadoop.mapred.TaskLog; //导入依赖的package包/类
/**
* @param lInfo
* @param taskLogFileDetails
* @param updatedTaskLogFileDetails
* @param logName
*/
private void copyOriginalIndexFileInfo(JVMInfo lInfo,
Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails,
Map<Task, Map<LogName, LogFileDetail>> updatedTaskLogFileDetails,
LogName logName) {
if (TaskLog.LOGS_TRACKED_BY_INDEX_FILES.contains(logName)) {
for (Task task : lInfo.getAllAttempts()) {
if (!updatedTaskLogFileDetails.containsKey(task)) {
updatedTaskLogFileDetails.put(task,
new HashMap<LogName, LogFileDetail>());
}
updatedTaskLogFileDetails.get(task).put(logName,
taskLogFileDetails.get(task).get(logName));
}
}
}
示例3: cleanupAllVolumes
import org.apache.hadoop.mapred.TaskLog; //导入依赖的package包/类
/**
* Move all files/directories inside volume into TOBEDELETED, and then
* delete them. The TOBEDELETED directory itself is ignored.
*/
public void cleanupAllVolumes() throws IOException {
for (int v = 0; v < volumes.length; v++) {
// List all files inside the volumes
FileStatus[] files = null;
try {
files = localFileSystem.listStatus(new Path(volumes[v]),
TaskLog.USERLOGS_PATH_FILTER);
} catch (Exception e) {
// Ignore exceptions in listStatus
// We tolerate missing volumes.
}
if (files != null) {
for (int f = 0; f < files.length; f++) {
// Get the file name - the last component of the Path
String entryName = files[f].getPath().getName();
// Do not delete the current TOBEDELETED
if (!TOBEDELETED.equals(entryName)) {
moveAndDeleteRelativePath(volumes[v], entryName);
}
}
}
}
}
示例4: revertIndexFileInfo
import org.apache.hadoop.mapred.TaskLog; //导入依赖的package包/类
/**
* @param lInfo
* @param taskLogFileDetails
* @param updatedTaskLogFileDetails
* @param logName
*/
private void revertIndexFileInfo(PerJVMInfo lInfo,
Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails,
Map<Task, Map<LogName, LogFileDetail>> updatedTaskLogFileDetails,
LogName logName) {
if (TaskLog.LOGS_TRACKED_BY_INDEX_FILES.contains(logName)) {
for (Task task : lInfo.allAttempts) {
if (!updatedTaskLogFileDetails.containsKey(task)) {
updatedTaskLogFileDetails.put(task,
new HashMap<LogName, LogFileDetail>());
}
updatedTaskLogFileDetails.get(task).put(logName,
taskLogFileDetails.get(task).get(logName));
}
}
}
示例5: MRAppMaster
import org.apache.hadoop.mapred.TaskLog; //导入依赖的package包/类
public MRAppMaster(ApplicationAttemptId applicationAttemptId,
ContainerId containerId, String nmHost, int nmPort, int nmHttpPort,
Clock clock, long appSubmitTime, int maxAppAttempts) {
super(MRAppMaster.class.getName());
this.clock = clock;
this.startTime = clock.getTime();
this.appSubmitTime = appSubmitTime;
this.appAttemptID = applicationAttemptId;
this.containerID = containerId;
this.nmHost = nmHost;
this.nmPort = nmPort;
this.nmHttpPort = nmHttpPort;
this.metrics = MRAppMetrics.create();
this.maxAppAttempts = maxAppAttempts;
logSyncer = TaskLog.createLogSyncer();
LOG.info("Created MRAppMaster for application " + applicationAttemptId);
}
示例6: getVMCommand
import org.apache.hadoop.mapred.TaskLog; //导入依赖的package包/类
/**
* Create worker attempt jvm command
* @param conf application configuration
* @param appid application id
* @param workerAttemptId worker attempt id
* @return
*/
public static List<String> getVMCommand(Configuration conf, ApplicationId appid, WorkerAttemptId workerAttemptId) {
Vector<String> vargs = new Vector<String>(8);
vargs.add(Environment.JAVA_HOME.$() + "/bin/java");
String javaOpts = getChildJavaOpts(conf, appid, workerAttemptId);
LOG.debug("javaOpts=" + javaOpts);
String[] javaOptsSplit = javaOpts.split(" ");
for (int i = 0; i < javaOptsSplit.length; i++) {
vargs.add(javaOptsSplit[i]);
}
Path childTmpDir = new Path(Environment.PWD.$(), YarnConfiguration.DEFAULT_CONTAINER_TEMP_DIR);
vargs.add("-Djava.io.tmpdir=" + childTmpDir);
// Setup the log4j prop
long logSize = 0;
setupLog4jProperties(conf, vargs, logSize);
// Add main class and its arguments
String workerClassName =
conf.get(AngelConf.ANGEL_WORKER_CLASS,
AngelConf.DEFAULT_ANGEL_WORKER_CLASS);
vargs.add(workerClassName);
vargs.add("1>" + getTaskLogFile(TaskLog.LogName.STDOUT));
vargs.add("2>" + getTaskLogFile(TaskLog.LogName.STDERR));
// Final commmand
StringBuilder mergedCommand = new StringBuilder();
for (CharSequence str : vargs) {
mergedCommand.append(str).append(" ");
}
Vector<String> vargsFinal = new Vector<String>(1);
vargsFinal.add(mergedCommand.toString());
return vargsFinal;
}
示例7: initStdOut
import org.apache.hadoop.mapred.TaskLog; //导入依赖的package包/类
/**
* clean previous std error and outs
*/
private void initStdOut(JobConf configuration) {
TaskAttemptID taskId = TaskAttemptID.forName(configuration
.get(MRJobConfig.TASK_ATTEMPT_ID));
File stdOut = TaskLog.getTaskLogFile(taskId, false, TaskLog.LogName.STDOUT);
File stdErr = TaskLog.getTaskLogFile(taskId, false, TaskLog.LogName.STDERR);
// prepare folder
if (!stdOut.getParentFile().exists()) {
stdOut.getParentFile().mkdirs();
} else { // clean logs
stdOut.deleteOnExit();
stdErr.deleteOnExit();
}
}
示例8: readStdOut
import org.apache.hadoop.mapred.TaskLog; //导入依赖的package包/类
private String readStdOut(JobConf conf) throws Exception {
TaskAttemptID taskId = TaskAttemptID.forName(conf
.get(MRJobConfig.TASK_ATTEMPT_ID));
File stdOut = TaskLog.getTaskLogFile(taskId, false, TaskLog.LogName.STDOUT);
return readFile(stdOut);
}
示例9: readTaskLog
import org.apache.hadoop.mapred.TaskLog; //导入依赖的package包/类
/**
* Reads tasklog and returns it as string after trimming it.
*
* @param filter
* Task log filter; can be STDOUT, STDERR, SYSLOG, DEBUGOUT, PROFILE
* @param taskId
* The task id for which the log has to collected
* @param isCleanup
* whether the task is a cleanup attempt or not.
* @return task log as string
* @throws IOException
*/
public static String readTaskLog(TaskLog.LogName filter,
org.apache.hadoop.mapred.TaskAttemptID taskId, boolean isCleanup)
throws IOException {
// string buffer to store task log
StringBuffer result = new StringBuffer();
int res;
// reads the whole tasklog into inputstream
InputStream taskLogReader = new TaskLog.Reader(taskId, filter, 0, -1,
isCleanup);
// construct string log from inputstream.
byte[] b = new byte[65536];
while (true) {
res = taskLogReader.read(b);
if (res > 0) {
result.append(new String(b));
} else {
break;
}
}
taskLogReader.close();
// trim the string and return it
String str = result.toString();
str = str.trim();
return str;
}
示例10: validateTaskStderr
import org.apache.hadoop.mapred.TaskLog; //导入依赖的package包/类
void validateTaskStderr(StreamJob job, TaskType type)
throws IOException {
TaskAttemptID attemptId =
new TaskAttemptID(new TaskID(job.jobId_, type, 0), 0);
String log = MapReduceTestUtil.readTaskLog(TaskLog.LogName.STDERR,
attemptId, false);
// trim() is called on expectedStderr here because the method
// MapReduceTestUtil.readTaskLog() returns trimmed String.
assertTrue(log.equals(expectedStderr.trim()));
}
示例11: getAllLogsFileDetails
import org.apache.hadoop.mapred.TaskLog; //导入依赖的package包/类
/**
* Get the logFileDetails of all the list of attempts passed.
* @param allAttempts the attempts we are interested in
*
* @return a map of task to the log-file detail
* @throws IOException
*/
private Map<Task, Map<LogName, LogFileDetail>> getAllLogsFileDetails(
final List<Task> allAttempts) throws IOException {
Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails =
new HashMap<Task, Map<LogName, LogFileDetail>>();
for (Task task : allAttempts) {
Map<LogName, LogFileDetail> allLogsFileDetails;
allLogsFileDetails =
TaskLog.getAllLogsFileDetails(task.getTaskID(), task.isTaskCleanupTask());
taskLogFileDetails.put(task, allLogsFileDetails);
}
return taskLogFileDetails;
}
示例12: updateIndicesAfterLogTruncation
import org.apache.hadoop.mapred.TaskLog; //导入依赖的package包/类
/**
* Truncation of logs is done. Now sync the index files to reflect the
* truncated sizes.
*
* @param firstAttempt
* @param updatedTaskLogFileDetails
*/
private void updateIndicesAfterLogTruncation(String location,
Map<Task, Map<LogName, LogFileDetail>> updatedTaskLogFileDetails) {
for (Entry<Task, Map<LogName, LogFileDetail>> entry :
updatedTaskLogFileDetails.entrySet()) {
Task task = entry.getKey();
Map<LogName, LogFileDetail> logFileDetails = entry.getValue();
Map<LogName, Long[]> logLengths = new HashMap<LogName, Long[]>();
// set current and previous lengths
for (LogName logName : TaskLog.LOGS_TRACKED_BY_INDEX_FILES) {
logLengths.put(logName, new Long[] { Long.valueOf(0L),
Long.valueOf(0L) });
LogFileDetail lfd = logFileDetails.get(logName);
if (lfd != null) {
// Set previous lengths
logLengths.get(logName)[0] = Long.valueOf(lfd.start);
// Set current lengths
logLengths.get(logName)[1] = Long.valueOf(lfd.start + lfd.length);
}
}
try {
TaskLog.writeToIndexFile(location, task.getTaskID(),
task.isTaskCleanupTask(), logLengths);
} catch (IOException ioe) {
LOG.warn("Exception encountered while updating index file of task "
+ task.getTaskID()
+ ". Ignoring and continuing with other tasks.", ioe);
}
}
}
示例13: main
import org.apache.hadoop.mapred.TaskLog; //导入依赖的package包/类
public static void main(String args[]) throws IOException {
isTruncaterJvm = true;
String taskRanFile = args[0];
Configuration conf = new Configuration();
//read the Task objects from the file
LocalFileSystem lfs = FileSystem.getLocal(conf);
FSDataInputStream din = lfs.open(new Path(taskRanFile));
int numTasksRan = din.readInt();
List<Task> taskAttemptsRan = new ArrayList<Task>();
for (int i = 0; i < numTasksRan; i++) {
Task t;
if (din.readBoolean()) {
t = new MapTask();
} else {
t = new ReduceTask();
}
t.readFields(din);
taskAttemptsRan.add(t);
}
Task firstTask = taskAttemptsRan.get(0);
TaskLogsTruncater trunc = new TaskLogsTruncater(conf);
trunc.truncateLogs(new JVMInfo(
TaskLog.getAttemptDir(firstTask.getTaskID(),
firstTask.isTaskCleanupTask()),
taskAttemptsRan));
System.exit(0);
}
示例14: initializeJobLogDir
import org.apache.hadoop.mapred.TaskLog; //导入依赖的package包/类
/**
* Create job log directory and set appropriate permissions for the directory.
*
* @param jobId
*/
public void initializeJobLogDir(JobID jobId) throws IOException {
Path jobUserLogDir = new Path(TaskLog.getJobDir(jobId).getCanonicalPath());
if (!fs.mkdirs(jobUserLogDir)) {
throw new IOException("Could not create job user log directory: " +
jobUserLogDir);
}
fs.setPermission(jobUserLogDir, new FsPermission((short)0700));
}
示例15: getAllLogsFileDetails
import org.apache.hadoop.mapred.TaskLog; //导入依赖的package包/类
/**
* Get the logFileDetails of all the list of attempts passed.
*
* @param lInfo
* @return a map of task to the log-file detail
* @throws IOException
*/
private Map<Task, Map<LogName, LogFileDetail>> getAllLogsFileDetails(
final List<Task> allAttempts) throws IOException {
Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails =
new HashMap<Task, Map<LogName, LogFileDetail>>();
for (Task task : allAttempts) {
Map<LogName, LogFileDetail> allLogsFileDetails;
allLogsFileDetails =
TaskLog.getAllLogsFileDetails(task.getTaskID(),
task.isTaskCleanupTask());
taskLogFileDetails.put(task, allLogsFileDetails);
}
return taskLogFileDetails;
}