本文整理汇总了Java中org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogKey类的典型用法代码示例。如果您正苦于以下问题:Java LogKey类的具体用法?Java LogKey怎么用?Java LogKey使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
LogKey类属于org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat包,在下文中一共展示了LogKey类的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: next
import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogKey; //导入依赖的package包/类
/**
* Read the next key and return the value-stream.
*
* @param key
* @return the valueStream if there are more keys or null otherwise.
* @throws IOException
*/
public DataInputStream next(LogKey key) throws IOException {
if (!this.atBeginning) {
this.scanner.advance();
} else {
this.atBeginning = false;
}
if (this.scanner.atEnd()) {
return null;
}
TFile.Reader.Scanner.Entry entry = this.scanner.entry();
key.readFields(entry.getKeyStream());
// Skip META keys
if (RESERVED_KEYS.containsKey(key.toString())) {
return next(key);
}
DataInputStream valueStream = entry.getValueStream();
return valueStream;
}
示例2: getContainerLogsReader
import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogKey; //导入依赖的package包/类
/**
* Get a ContainerLogsReader to read the logs for
* the specified container.
*
* @param containerId
* @return object to read the container's logs or null if the
* logs could not be found
* @throws IOException
*/
public ContainerLogsReader getContainerLogsReader(
ContainerId containerId) throws IOException {
ContainerLogsReader logReader = null;
final LogKey containerKey = new LogKey(containerId);
LogKey key = new LogKey();
DataInputStream valueStream = next(key);
while (valueStream != null && !key.equals(containerKey)) {
valueStream = next(key);
}
if (valueStream != null) {
logReader = new ContainerLogsReader(valueStream);
}
return logReader;
}
示例3: testForCorruptedAggregatedLogs
import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogKey; //导入依赖的package包/类
@Test
public void testForCorruptedAggregatedLogs() throws Exception {
Configuration conf = new Configuration();
File workDir = new File(testWorkDir, "testReadAcontainerLogs1");
Path remoteAppLogFile =
new Path(workDir.getAbsolutePath(), "aggregatedLogFile");
Path srcFileRoot = new Path(workDir.getAbsolutePath(), "srcFiles");
ContainerId testContainerId = TestContainerId.newContainerId(1, 1, 1, 1);
Path t =
new Path(srcFileRoot, testContainerId.getApplicationAttemptId()
.getApplicationId().toString());
Path srcFilePath = new Path(t, testContainerId.toString());
long numChars = 950000;
writeSrcFileAndALog(srcFilePath, "stdout", numChars, remoteAppLogFile,
srcFileRoot, testContainerId);
LogReader logReader = new LogReader(conf, remoteAppLogFile);
LogKey rLogKey = new LogKey();
DataInputStream dis = logReader.next(rLogKey);
Writer writer = new StringWriter();
try {
LogReader.readAcontainerLogs(dis, writer);
} catch (Exception e) {
if(e.toString().contains("NumberFormatException")) {
Assert.fail("Aggregated logs are corrupted.");
}
}
}
示例4: doContainerLogAggregation
import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogKey; //导入依赖的package包/类
public Set<Path> doContainerLogAggregation(LogWriter writer,
boolean appFinished) {
LOG.info("Uploading logs for container " + containerId
+ ". Current good log dirs are "
+ StringUtils.join(",", dirsHandler.getLogDirsForRead()));
final LogKey logKey = new LogKey(containerId);
final LogValue logValue =
new LogValue(dirsHandler.getLogDirsForRead(), containerId,
userUgi.getShortUserName(), logAggregationContext,
this.uploadedFileMeta, appFinished);
try {
writer.append(logKey, logValue);
} catch (Exception e) {
LOG.error("Couldn't upload logs for " + containerId
+ ". Skipping this container.", e);
return new HashSet<Path>();
}
this.uploadedFileMeta.addAll(logValue
.getCurrentUpLoadedFileMeta());
// if any of the previous uploaded logs have been deleted,
// we need to remove them from alreadyUploadedLogs
Iterable<String> mask =
Iterables.filter(uploadedFileMeta, new Predicate<String>() {
@Override
public boolean apply(String next) {
return logValue.getAllExistingFilesMeta().contains(next);
}
});
this.uploadedFileMeta = Sets.newHashSet(mask);
return logValue.getCurrentUpLoadedFilesPath();
}
示例5: serviceStart
import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogKey; //导入依赖的package包/类
@Override
public void serviceStart() throws Exception {
try {
// Get user's FileSystem credentials
final UserGroupInformation userUgi = UserGroupInformation
.createRemoteUser(user);
createAppDir(user, applicationId, userUgi, conf,
remoteNodeTmpLogFileForApp);
Path remoteNodeLogFileForApp = getRemoteNodeLogFileForApp(conf,
remoteNodeTmpLogFileForApp,
ConverterUtils.toApplicationId(applicationId), user);
LogWriter writer = new LogWriter(conf, remoteNodeLogFileForApp, userUgi);
List<ContainerId> containers = getAllContainers(applicationId, conf);
LOG.info("Starting Log aggregation for containers : " + containers);
String[] hpcLogDir = HPCConfiguration.getHPCLogDirs(conf);
List<String> logDirs = Arrays.asList(hpcLogDir);
for (ContainerId containerId : containers) {
LogKey logKey = new LogKey(containerId);
LogValue logValue = new LogValue(logDirs, containerId,
userUgi.getShortUserName());
writer.append(logKey, logValue);
}
writer.close();
LOG.info("Log aggregation has completed.");
// Remove the log files from local dir's
delete(applicationId, hpcLogDir);
// Clean up container work dirs
delete(applicationId, HPCConfiguration.getHPCLocalDirs(conf));
} catch (Throwable e) {
throw new RuntimeException("Failed to complete aggregation on "
+ hostName + "for application " + applicationId, e);
}
super.serviceStart();
}
示例6: doContainerLogAggregation
import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogKey; //导入依赖的package包/类
public Set<Path> doContainerLogAggregation(LogWriter writer) {
LOG.info("Uploading logs for container " + containerId
+ ". Current good log dirs are "
+ StringUtils.join(",", dirsHandler.getLogDirs()));
final LogKey logKey = new LogKey(containerId);
final LogValue logValue =
new LogValue(dirsHandler.getLogDirs(), containerId,
userUgi.getShortUserName(), logAggregationContext,
this.uploadedFileMeta);
try {
writer.append(logKey, logValue);
} catch (Exception e) {
LOG.error("Couldn't upload logs for " + containerId
+ ". Skipping this container.");
return new HashSet<Path>();
}
this.uploadedFileMeta.addAll(logValue
.getCurrentUpLoadedFileMeta());
// if any of the previous uploaded logs have been deleted,
// we need to remove them from alreadyUploadedLogs
Iterable<String> mask =
Iterables.filter(uploadedFileMeta, new Predicate<String>() {
@Override
public boolean apply(String next) {
return logValue.getAllExistingFilesMeta().contains(next);
}
});
this.uploadedFileMeta = Sets.newHashSet(mask);
return logValue.getCurrentUpLoadedFilesPath();
}
示例7: doContainerLogAggregation
import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogKey; //导入依赖的package包/类
public Set<Path> doContainerLogAggregation(LogWriter writer,
boolean appFinished, boolean containerFinished) {
LOG.info("Uploading logs for container " + containerId
+ ". Current good log dirs are "
+ StringUtils.join(",", dirsHandler.getLogDirsForRead()));
final LogKey logKey = new LogKey(containerId);
final LogValue logValue =
new LogValue(dirsHandler.getLogDirsForRead(), containerId,
userUgi.getShortUserName(), logAggregationContext,
this.uploadedFileMeta, appFinished, containerFinished, userFolder);
try {
writer.append(logKey, logValue);
} catch (Exception e) {
LOG.error("Couldn't upload logs for " + containerId
+ ". Skipping this container.", e);
return new HashSet<Path>();
}
this.uploadedFileMeta.addAll(logValue
.getCurrentUpLoadedFileMeta());
// if any of the previous uploaded logs have been deleted,
// we need to remove them from alreadyUploadedLogs
Iterable<String> mask =
Iterables.filter(uploadedFileMeta, new Predicate<String>() {
@Override
public boolean apply(String next) {
return logValue.getAllExistingFilesMeta().contains(next);
}
});
this.uploadedFileMeta = Sets.newHashSet(mask);
return logValue.getCurrentUpLoadedFilesPath();
}
示例8: writeSrcFileAndALog
import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogKey; //导入依赖的package包/类
private void writeSrcFileAndALog(Path srcFilePath, String fileName, final long length,
Path remoteAppLogFile, Path srcFileRoot, ContainerId testContainerId)
throws Exception {
File dir = new File(srcFilePath.toString());
if (!dir.exists()) {
if (!dir.mkdirs()) {
throw new IOException("Unable to create directory : " + dir);
}
}
File outputFile = new File(new File(srcFilePath.toString()), fileName);
FileOutputStream os = new FileOutputStream(outputFile);
final OutputStreamWriter osw = new OutputStreamWriter(os, "UTF8");
final int ch = filler;
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
LogWriter logWriter = new LogWriter(conf, remoteAppLogFile, ugi);
LogKey logKey = new LogKey(testContainerId);
LogValue logValue =
spy(new LogValue(Collections.singletonList(srcFileRoot.toString()),
testContainerId, ugi.getShortUserName()));
final CountDownLatch latch = new CountDownLatch(1);
Thread t = new Thread() {
public void run() {
try {
for(int i=0; i < length/3; i++) {
osw.write(ch);
}
latch.countDown();
for(int i=0; i < (2*length)/3; i++) {
osw.write(ch);
}
osw.close();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
};
t.start();
//Wait till the osw is partially written
//aggregation starts once the ows has completed 1/3rd of its work
latch.await();
//Aggregate The Logs
logWriter.append(logKey, logValue);
logWriter.close();
}
示例9: testReadAcontainerLogs1
import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogKey; //导入依赖的package包/类
@Test
public void testReadAcontainerLogs1() throws Exception {
Configuration conf = new Configuration();
File workDir = new File(testWorkDir, "testReadAcontainerLogs1");
Path remoteAppLogFile =
new Path(workDir.getAbsolutePath(), "aggregatedLogFile");
Path srcFileRoot = new Path(workDir.getAbsolutePath(), "srcFiles");
ContainerId testContainerId = TestContainerId.newContainerId(1, 1, 1, 1);
Path t =
new Path(srcFileRoot, testContainerId.getApplicationAttemptId()
.getApplicationId().toString());
Path srcFilePath = new Path(t, testContainerId.toString());
int numChars = 80000;
writeSrcFile(srcFilePath, "stdout", numChars);
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
LogWriter logWriter = new LogWriter(conf, remoteAppLogFile, ugi);
LogKey logKey = new LogKey(testContainerId);
LogValue logValue =
new LogValue(Collections.singletonList(srcFileRoot.toString()),
testContainerId, ugi.getShortUserName());
logWriter.append(logKey, logValue);
logWriter.closeWriter();
// make sure permission are correct on the file
FileStatus fsStatus = fs.getFileStatus(remoteAppLogFile);
Assert.assertEquals("permissions on log aggregation file are wrong",
FsPermission.createImmutable((short) 0640), fsStatus.getPermission());
LogReader logReader = new LogReader(conf, remoteAppLogFile);
LogKey rLogKey = new LogKey();
DataInputStream dis = logReader.next(rLogKey);
Writer writer = new StringWriter();
LogReader.readAcontainerLogs(dis, writer);
String s = writer.toString();
int expectedLength =
"\n\nLogType:stdout".length() + ("\nLogLength:" + numChars).length()
+ "\nLog Contents:\n".length() + numChars;
Assert.assertTrue("LogType not matched", s.contains("LogType:stdout"));
Assert.assertTrue("LogLength not matched", s.contains("LogLength:" + numChars));
Assert.assertTrue("Log Contents not matched", s.contains("Log Contents"));
StringBuilder sb = new StringBuilder();
for (int i = 0 ; i < numChars ; i++) {
sb.append(filler);
}
String expectedContent = sb.toString();
Assert.assertTrue("Log content incorrect", s.contains(expectedContent));
Assert.assertEquals(expectedLength, s.length());
}
示例10: writeSrcFileAndALog
import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogKey; //导入依赖的package包/类
private void writeSrcFileAndALog(Path srcFilePath, String fileName, final long length,
Path remoteAppLogFile, Path srcFileRoot, ContainerId testContainerId)
throws Exception {
File dir = new File(srcFilePath.toString());
if (!dir.exists()) {
if (!dir.mkdirs()) {
throw new IOException("Unable to create directory : " + dir);
}
}
File outputFile = new File(new File(srcFilePath.toString()), fileName);
FileOutputStream os = new FileOutputStream(outputFile);
final OutputStreamWriter osw = new OutputStreamWriter(os, "UTF8");
final int ch = filler;
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
try (LogWriter logWriter = new LogWriter()) {
logWriter.initialize(conf, remoteAppLogFile, ugi);
LogKey logKey = new LogKey(testContainerId);
LogValue logValue =
spy(new LogValue(Collections.singletonList(srcFileRoot.toString()),
testContainerId, ugi.getShortUserName(),ugi.getShortUserName()));
final CountDownLatch latch = new CountDownLatch(1);
Thread t = new Thread() {
public void run() {
try {
for (int i = 0; i < length / 3; i++) {
osw.write(ch);
}
latch.countDown();
for (int i = 0; i < (2 * length) / 3; i++) {
osw.write(ch);
}
osw.close();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
};
t.start();
//Wait till the osw is partially written
//aggregation starts once the ows has completed 1/3rd of its work
latch.await();
//Aggregate The Logs
logWriter.append(logKey, logValue);
}
}
示例11: testReadAcontainerLogs1
import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogKey; //导入依赖的package包/类
@Test
public void testReadAcontainerLogs1() throws Exception {
Configuration conf = new Configuration();
File workDir = new File(testWorkDir, "testReadAcontainerLogs1");
Path remoteAppLogFile =
new Path(workDir.getAbsolutePath(), "aggregatedLogFile");
Path srcFileRoot = new Path(workDir.getAbsolutePath(), "srcFiles");
ContainerId testContainerId = TestContainerId.newContainerId(1, 1, 1, 1);
Path t =
new Path(srcFileRoot, testContainerId.getApplicationAttemptId()
.getApplicationId().toString());
Path srcFilePath = new Path(t, testContainerId.toString());
int numChars = 80000;
writeSrcFile(srcFilePath, "stdout", numChars);
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
LogWriter logWriter = new LogWriter(conf, remoteAppLogFile, ugi);
LogKey logKey = new LogKey(testContainerId);
LogValue logValue =
new LogValue(Collections.singletonList(srcFileRoot.toString()),
testContainerId, ugi.getShortUserName());
logWriter.append(logKey, logValue);
logWriter.close();
// make sure permission are correct on the file
FileStatus fsStatus = fs.getFileStatus(remoteAppLogFile);
Assert.assertEquals("permissions on log aggregation file are wrong",
FsPermission.createImmutable((short) 0640), fsStatus.getPermission());
LogReader logReader = new LogReader(conf, remoteAppLogFile);
LogKey rLogKey = new LogKey();
DataInputStream dis = logReader.next(rLogKey);
Writer writer = new StringWriter();
LogReader.readAcontainerLogs(dis, writer);
String s = writer.toString();
int expectedLength =
"\n\nLogType:stdout".length() + ("\nLogLength:" + numChars).length()
+ "\nLog Contents:\n".length() + numChars;
Assert.assertTrue("LogType not matched", s.contains("LogType:stdout"));
Assert.assertTrue("LogLength not matched", s.contains("LogLength:" + numChars));
Assert.assertTrue("Log Contents not matched", s.contains("Log Contents"));
StringBuilder sb = new StringBuilder();
for (int i = 0 ; i < numChars ; i++) {
sb.append(filler);
}
String expectedContent = sb.toString();
Assert.assertTrue("Log content incorrect", s.contains(expectedContent));
Assert.assertEquals(expectedLength, s.length());
}