本文整理汇总了Java中org.apache.hadoop.yarn.logaggregation.LogCLIHelpers类的典型用法代码示例。如果您正苦于以下问题:Java LogCLIHelpers类的具体用法?Java LogCLIHelpers怎么用?Java LogCLIHelpers使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
LogCLIHelpers类属于org.apache.hadoop.yarn.logaggregation包,在下文中一共展示了LogCLIHelpers类的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testFailResultCodes
import org.apache.hadoop.yarn.logaggregation.LogCLIHelpers; //导入依赖的package包/类
@Test(timeout = 5000l)
public void testFailResultCodes() throws Exception {
Configuration conf = new YarnConfiguration();
conf.setClass("fs.file.impl", LocalFileSystem.class, FileSystem.class);
LogCLIHelpers cliHelper = new LogCLIHelpers();
cliHelper.setConf(conf);
YarnClient mockYarnClient = createMockYarnClient(YarnApplicationState.FINISHED);
LogsCLI dumper = new LogsCLIForTest(mockYarnClient);
dumper.setConf(conf);
// verify dumping a non-existent application's logs returns a failure code
int exitCode = dumper.run( new String[] {
"-applicationId", "application_0_0" } );
assertTrue("Should return an error code", exitCode != 0);
// verify dumping a non-existent container log is a failure code
exitCode = cliHelper.dumpAContainersLogs("application_0_0", "container_0_0",
"nonexistentnode:1234", "nobody");
assertTrue("Should return an error code", exitCode != 0);
}
示例2: dumpFullLogs
import org.apache.hadoop.yarn.logaggregation.LogCLIHelpers; //导入依赖的package包/类
/**
* Dumps the full job logs for a particular application to stdout
*
* @param applicationId application to dump logs for
*/
public static void dumpFullLogs(Configuration cfg, ApplicationId applicationId) {
LogCLIHelpers logCLIHelpers = new LogCLIHelpers();
// TODO: Add the proper base dir settings etc...
logCLIHelpers.setConf(cfg);
try {
logCLIHelpers.dumpAllContainersLogs(applicationId, cfg.get(SPYDRA_HISTORY_USERNAME_PROPERTY), System.out);
} catch (IOException e) {
logger.error("Failed dumping log files for application " + applicationId.toString(), e);
}
}
示例3: printContainerLogsFromRunningApplication
import org.apache.hadoop.yarn.logaggregation.LogCLIHelpers; //导入依赖的package包/类
private void printContainerLogsFromRunningApplication(Configuration conf,
String appId, String containerIdStr, String nodeHttpAddress,
String nodeId, String[] logFiles, LogCLIHelpers logCliHelper,
String appOwner) throws IOException {
String [] requestedLogFiles = logFiles;
// fetch all the log files for the container
if (fetchAllLogFiles(logFiles)) {
requestedLogFiles =
getContainerLogFiles(getConf(), containerIdStr, nodeHttpAddress);
}
Client webServiceClient = Client.create();
String containerString = "\n\nContainer: " + containerIdStr;
System.out.println(containerString);
System.out.println(StringUtils.repeat("=", containerString.length()));
for (String logFile : requestedLogFiles) {
System.out.println("LogType:" + logFile);
System.out.println("Log Upload Time:"
+ Times.format(System.currentTimeMillis()));
System.out.println("Log Contents:");
try {
WebResource webResource =
webServiceClient.resource(WebAppUtils.getHttpSchemePrefix(conf)
+ nodeHttpAddress);
ClientResponse response =
webResource.path("ws").path("v1").path("node")
.path("containerlogs").path(containerIdStr).path(logFile)
.accept(MediaType.TEXT_PLAIN).get(ClientResponse.class);
System.out.println(response.getEntity(String.class));
System.out.println("End of LogType:" + logFile);
} catch (ClientHandlerException | UniformInterfaceException ex) {
System.out.println("Can not find the log file:" + logFile
+ " for the container:" + containerIdStr + " in NodeManager:"
+ nodeId);
}
}
// for the case, we have already uploaded partial logs in HDFS
logCliHelper.dumpAContainersLogsForALogType(appId, containerIdStr, nodeId,
appOwner, Arrays.asList(requestedLogFiles));
}
示例4: printContainerLogsForFinishedApplication
import org.apache.hadoop.yarn.logaggregation.LogCLIHelpers; //导入依赖的package包/类
private void printContainerLogsForFinishedApplication(String appId,
String containerId, String nodeAddress, String[] logFiles,
LogCLIHelpers logCliHelper, String appOwner) throws IOException {
String containerString = "\n\nContainer: " + containerId;
System.out.println(containerString);
System.out.println(StringUtils.repeat("=", containerString.length()));
logCliHelper.dumpAContainersLogsForALogType(appId, containerId,
nodeAddress, appOwner, logFiles != null ? Arrays.asList(logFiles) : null);
}
示例5: outputAMContainerLogs
import org.apache.hadoop.yarn.logaggregation.LogCLIHelpers; //导入依赖的package包/类
private void outputAMContainerLogs(AMLogsRequest request, Configuration conf,
String appId, String[] logFiles, LogCLIHelpers logCliHelper,
String appOwner) throws Exception {
String nodeHttpAddress = request.getNodeHttpAddress();
String containerId = request.getAmContainerId();
String nodeId = request.getNodeId();
if (request.isAppFinished()) {
if (containerId != null && !containerId.isEmpty()) {
if (nodeId == null || nodeId.isEmpty()) {
try {
nodeId =
getContainerReport(containerId).getAssignedNode().toString();
} catch (Exception ex) {
System.err.println(ex);
nodeId = null;
}
}
if (nodeId != null && !nodeId.isEmpty()) {
String [] requestedLogFilesList = null;
if(!fetchAllLogFiles(logFiles)) {
requestedLogFilesList = logFiles;
}
printContainerLogsForFinishedApplication(appId, containerId, nodeId,
requestedLogFilesList, logCliHelper, appOwner);
}
}
} else {
if (nodeHttpAddress != null && containerId != null
&& !nodeHttpAddress.isEmpty() && !containerId.isEmpty()) {
String [] requestedLogFiles = logFiles;
// fetch all the log files for the AM
if (fetchAllLogFiles(logFiles)) {
requestedLogFiles =
getContainerLogFiles(getConf(), containerId, nodeHttpAddress);
}
printContainerLogsFromRunningApplication(conf, appId, containerId,
nodeHttpAddress, nodeId, requestedLogFiles, logCliHelper, appOwner);
}
}
}