本文整理汇总了Java中org.apache.hadoop.mapred.TaskLog.LogName类的典型用法代码示例。如果您正苦于以下问题:Java LogName类的具体用法?Java LogName怎么用?Java LogName使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
LogName类属于org.apache.hadoop.mapred.TaskLog包,在下文中一共展示了LogName类的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testTaskLogWithoutTaskLogDir
import org.apache.hadoop.mapred.TaskLog.LogName; //导入依赖的package包/类
/**
* test without TASK_LOG_DIR
*
* @throws IOException
*/
@Test (timeout=50000)
public void testTaskLogWithoutTaskLogDir() throws IOException {
// TaskLog tasklog= new TaskLog();
System.clearProperty(YarnConfiguration.YARN_APP_CONTAINER_LOG_DIR);
// test TaskLog
assertEquals(TaskLog.getMRv2LogDir(), null);
TaskAttemptID taid = mock(TaskAttemptID.class);
JobID jid = new JobID("job", 1);
when(taid.getJobID()).thenReturn(jid);
when(taid.toString()).thenReturn("JobId");
File f = TaskLog.getTaskLogFile(taid, true, LogName.STDOUT);
assertTrue(f.getAbsolutePath().endsWith("stdout"));
}
示例2: copyOriginalIndexFileInfo
import org.apache.hadoop.mapred.TaskLog.LogName; //导入依赖的package包/类
/**
* @param lInfo
* @param taskLogFileDetails
* @param updatedTaskLogFileDetails
* @param logName
*/
private void copyOriginalIndexFileInfo(JVMInfo lInfo,
Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails,
Map<Task, Map<LogName, LogFileDetail>> updatedTaskLogFileDetails,
LogName logName) {
if (TaskLog.LOGS_TRACKED_BY_INDEX_FILES.contains(logName)) {
for (Task task : lInfo.getAllAttempts()) {
if (!updatedTaskLogFileDetails.containsKey(task)) {
updatedTaskLogFileDetails.put(task,
new HashMap<LogName, LogFileDetail>());
}
updatedTaskLogFileDetails.get(task).put(logName,
taskLogFileDetails.get(task).get(logName));
}
}
}
示例3: isTruncationNeeded
import org.apache.hadoop.mapred.TaskLog.LogName; //导入依赖的package包/类
/**
* Check if truncation of logs is needed for the given jvmInfo. If all the
* tasks that ran in a JVM are within the log-limits, then truncation is not
* needed. Otherwise it is needed.
*
* @param lInfo
* @param taskLogFileDetails
* @param logName
* @return true if truncation is needed, false otherwise
*/
private boolean isTruncationNeeded(JVMInfo lInfo,
Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails,
LogName logName) {
boolean truncationNeeded = false;
LogFileDetail logFileDetail = null;
for (Task task : lInfo.getAllAttempts()) {
long taskRetainSize =
(task.isMapTask() ? mapRetainSize : reduceRetainSize);
Map<LogName, LogFileDetail> allLogsFileDetails =
taskLogFileDetails.get(task);
logFileDetail = allLogsFileDetails.get(logName);
if (taskRetainSize > MINIMUM_RETAIN_SIZE_FOR_TRUNCATION
&& logFileDetail.length > taskRetainSize) {
truncationNeeded = true;
break;
}
}
return truncationNeeded;
}
示例4: revertIndexFileInfo
import org.apache.hadoop.mapred.TaskLog.LogName; //导入依赖的package包/类
/**
* @param lInfo
* @param taskLogFileDetails
* @param updatedTaskLogFileDetails
* @param logName
*/
private void revertIndexFileInfo(PerJVMInfo lInfo,
Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails,
Map<Task, Map<LogName, LogFileDetail>> updatedTaskLogFileDetails,
LogName logName) {
if (TaskLog.LOGS_TRACKED_BY_INDEX_FILES.contains(logName)) {
for (Task task : lInfo.allAttempts) {
if (!updatedTaskLogFileDetails.containsKey(task)) {
updatedTaskLogFileDetails.put(task,
new HashMap<LogName, LogFileDetail>());
}
updatedTaskLogFileDetails.get(task).put(logName,
taskLogFileDetails.get(task).get(logName));
}
}
}
示例5: isTruncationNeeded
import org.apache.hadoop.mapred.TaskLog.LogName; //导入依赖的package包/类
/**
* Check if truncation of logs is needed for the given jvmInfo. If all the
* tasks that ran in a JVM are within the log-limits, then truncation is not
* needed. Otherwise it is needed.
*
* @param lInfo
* @param taskLogFileDetails
* @param logName
* @return true if truncation is needed, false otherwise
*/
private boolean isTruncationNeeded(PerJVMInfo lInfo,
Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails,
LogName logName) {
boolean truncationNeeded = false;
LogFileDetail logFileDetail = null;
for (Task task : lInfo.allAttempts) {
long taskRetainSize =
(task.isMapTask() ? mapRetainSize : reduceRetainSize);
Map<LogName, LogFileDetail> allLogsFileDetails =
taskLogFileDetails.get(task);
logFileDetail = allLogsFileDetails.get(logName);
if (taskRetainSize > MINIMUM_RETAIN_SIZE_FOR_TRUNCATION
&& logFileDetail.length > taskRetainSize) {
truncationNeeded = true;
break;
}
}
return truncationNeeded;
}
示例6: getVMCommand
import org.apache.hadoop.mapred.TaskLog.LogName; //导入依赖的package包/类
/**
* Create worker attempt jvm command
* @param conf application configuration
* @param appid application id
* @param workerAttemptId worker attempt id
* @return
*/
public static List<String> getVMCommand(Configuration conf, ApplicationId appid, WorkerAttemptId workerAttemptId) {
Vector<String> vargs = new Vector<String>(8);
vargs.add(Environment.JAVA_HOME.$() + "/bin/java");
String javaOpts = getChildJavaOpts(conf, appid, workerAttemptId);
LOG.debug("javaOpts=" + javaOpts);
String[] javaOptsSplit = javaOpts.split(" ");
for (int i = 0; i < javaOptsSplit.length; i++) {
vargs.add(javaOptsSplit[i]);
}
Path childTmpDir = new Path(Environment.PWD.$(), YarnConfiguration.DEFAULT_CONTAINER_TEMP_DIR);
vargs.add("-Djava.io.tmpdir=" + childTmpDir);
// Setup the log4j prop
long logSize = 0;
setupLog4jProperties(conf, vargs, logSize);
// Add main class and its arguments
String workerClassName =
conf.get(AngelConf.ANGEL_WORKER_CLASS,
AngelConf.DEFAULT_ANGEL_WORKER_CLASS);
vargs.add(workerClassName);
vargs.add("1>" + getTaskLogFile(TaskLog.LogName.STDOUT));
vargs.add("2>" + getTaskLogFile(TaskLog.LogName.STDERR));
// Final commmand
StringBuilder mergedCommand = new StringBuilder();
for (CharSequence str : vargs) {
mergedCommand.append(str).append(" ");
}
Vector<String> vargsFinal = new Vector<String>(1);
vargsFinal.add(mergedCommand.toString());
return vargsFinal;
}
示例7: readTaskLog
import org.apache.hadoop.mapred.TaskLog.LogName; //导入依赖的package包/类
/**
* Reads tasklog and returns it as string after trimming it.
*
* @param filter
* Task log filter; can be STDOUT, STDERR, SYSLOG, DEBUGOUT, PROFILE
* @param taskId
* The task id for which the log has to collected
* @param isCleanup
* whether the task is a cleanup attempt or not.
* @return task log as string
* @throws IOException
*/
public static String readTaskLog(TaskLog.LogName filter,
org.apache.hadoop.mapred.TaskAttemptID taskId, boolean isCleanup)
throws IOException {
// string buffer to store task log
StringBuffer result = new StringBuffer();
int res;
// reads the whole tasklog into inputstream
InputStream taskLogReader = new TaskLog.Reader(taskId, filter, 0, -1,
isCleanup);
// construct string log from inputstream.
byte[] b = new byte[65536];
while (true) {
res = taskLogReader.read(b);
if (res > 0) {
result.append(new String(b));
} else {
break;
}
}
taskLogReader.close();
// trim the string and return it
String str = result.toString();
str = str.trim();
return str;
}
示例8: readTaskLog
import org.apache.hadoop.mapred.TaskLog.LogName; //导入依赖的package包/类
public String readTaskLog(TaskLog.LogName filter,
org.apache.hadoop.mapred.TaskAttemptID taskId, boolean isCleanup)
throws IOException {
// string buffer to store task log
StringBuffer result = new StringBuffer();
int res;
// reads the whole tasklog into inputstream
InputStream taskLogReader = new TaskLog.Reader(taskId, filter, 0, -1,
isCleanup);
// construct string log from inputstream.
byte[] b = new byte[65536];
while (true) {
res = taskLogReader.read(b);
if (res > 0) {
result.append(new String(b));
} else {
break;
}
}
taskLogReader.close();
// trim the string and return it
String str = result.toString();
str = str.trim();
return str;
}
示例9: shouldTruncateLogs
import org.apache.hadoop.mapred.TaskLog.LogName; //导入依赖的package包/类
/**
* Check the log file sizes generated by the attempts that ran in a
* particular JVM
* @param lInfo
* @return
* @throws IOException
*/
public boolean shouldTruncateLogs(JVMInfo lInfo) throws IOException {
// Read the log-file details for all the attempts that ran in this JVM
Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails;
try {
taskLogFileDetails = getAllLogsFileDetails(lInfo.getAllAttempts());
} catch (IOException e) {
LOG.warn(
"Exception in truncateLogs while getting allLogsFileDetails()."
+ " Ignoring the truncation of logs of this process.", e);
return false;
}
File attemptLogDir = lInfo.getLogLocation();
for (LogName logName : LogName.values()) {
File logFile = new File(attemptLogDir, logName.toString());
if (logFile.exists()) {
if(!isTruncationNeeded(lInfo, taskLogFileDetails, logName)) {
LOG.debug("Truncation is not needed for "
+ logFile.getAbsolutePath());
} else return true;
}
}
return false;
}
示例10: getAllLogsFileDetails
import org.apache.hadoop.mapred.TaskLog.LogName; //导入依赖的package包/类
/**
* Get the logFileDetails of all the list of attempts passed.
* @param allAttempts the attempts we are interested in
*
* @return a map of task to the log-file detail
* @throws IOException
*/
private Map<Task, Map<LogName, LogFileDetail>> getAllLogsFileDetails(
final List<Task> allAttempts) throws IOException {
Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails =
new HashMap<Task, Map<LogName, LogFileDetail>>();
for (Task task : allAttempts) {
Map<LogName, LogFileDetail> allLogsFileDetails;
allLogsFileDetails =
TaskLog.getAllLogsFileDetails(task.getTaskID(), task.isTaskCleanupTask());
taskLogFileDetails.put(task, allLogsFileDetails);
}
return taskLogFileDetails;
}
示例11: updateIndicesAfterLogTruncation
import org.apache.hadoop.mapred.TaskLog.LogName; //导入依赖的package包/类
/**
* Truncation of logs is done. Now sync the index files to reflect the
* truncated sizes.
*
* @param firstAttempt
* @param updatedTaskLogFileDetails
*/
private void updateIndicesAfterLogTruncation(String location,
Map<Task, Map<LogName, LogFileDetail>> updatedTaskLogFileDetails) {
for (Entry<Task, Map<LogName, LogFileDetail>> entry :
updatedTaskLogFileDetails.entrySet()) {
Task task = entry.getKey();
Map<LogName, LogFileDetail> logFileDetails = entry.getValue();
Map<LogName, Long[]> logLengths = new HashMap<LogName, Long[]>();
// set current and previous lengths
for (LogName logName : TaskLog.LOGS_TRACKED_BY_INDEX_FILES) {
logLengths.put(logName, new Long[] { Long.valueOf(0L),
Long.valueOf(0L) });
LogFileDetail lfd = logFileDetails.get(logName);
if (lfd != null) {
// Set previous lengths
logLengths.get(logName)[0] = Long.valueOf(lfd.start);
// Set current lengths
logLengths.get(logName)[1] = Long.valueOf(lfd.start + lfd.length);
}
}
try {
TaskLog.writeToIndexFile(location, task.getTaskID(),
task.isTaskCleanupTask(), logLengths);
} catch (IOException ioe) {
LOG.warn("Exception encountered while updating index file of task "
+ task.getTaskID()
+ ". Ignoring and continuing with other tasks.", ioe);
}
}
}
示例12: writeBytes
import org.apache.hadoop.mapred.TaskLog.LogName; //导入依赖的package包/类
private void writeBytes(TaskAttemptID firstAttemptID, TaskAttemptID attemptID,
LogName logName, long numBytes, boolean random, char data) throws IOException {
File logFile = TaskLog.getTaskLogFile(firstAttemptID, false, logName);
File logLocation = logFile.getParentFile();
LOG.info("Going to write " + numBytes + " real bytes to the log file "
+ logFile);
if (!logLocation.exists()
&& !logLocation.mkdirs()) {
throw new IOException("Couldn't create all ancestor dirs for "
+ logFile);
}
File attemptDir = TaskLog.getAttemptDir(attemptID, false);
if (!attemptDir.exists() && !attemptDir.mkdirs()) {
throw new IOException("Couldn't create all ancestor dirs for "
+ logFile);
}
// Need to call up front to set currenttaskid.
TaskLog.syncLogs(logLocation.toString(), attemptID, false, true);
FileOutputStream outputStream = new FileOutputStream(logFile, true);
Random r = new Random();
for (long i = 0; i < numBytes; i++) {
if(random) {
outputStream.write(r.nextInt());
} else {
outputStream.write(data);
}
}
outputStream.close();
TaskLog.syncLogs(logLocation.toString(), attemptID, false, true);
LOG.info("Written " + logFile.length() + " real bytes to the log file "
+ logFile);
}
示例13: getAllLogsFileDetails
import org.apache.hadoop.mapred.TaskLog.LogName; //导入依赖的package包/类
/**
* Get the logFileDetails of all the list of attempts passed.
*
* @param lInfo
* @return a map of task to the log-file detail
* @throws IOException
*/
private Map<Task, Map<LogName, LogFileDetail>> getAllLogsFileDetails(
final List<Task> allAttempts) throws IOException {
Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails =
new HashMap<Task, Map<LogName, LogFileDetail>>();
for (Task task : allAttempts) {
Map<LogName, LogFileDetail> allLogsFileDetails;
allLogsFileDetails =
TaskLog.getAllLogsFileDetails(task.getTaskID(),
task.isTaskCleanupTask());
taskLogFileDetails.put(task, allLogsFileDetails);
}
return taskLogFileDetails;
}