当前位置: 首页>>代码示例>>Java>>正文


Java LogWriter类代码示例

本文整理汇总了Java中org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogWriter的典型用法代码示例。如果您正苦于以下问题:Java LogWriter类的具体用法?Java LogWriter怎么用?Java LogWriter使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


LogWriter类属于org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat包,在下文中一共展示了LogWriter类的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: doContainerLogAggregation

import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogWriter; //导入依赖的package包/类
public Set<Path> doContainerLogAggregation(LogWriter writer,
    boolean appFinished) {
  LOG.info("Uploading logs for container " + containerId
      + ". Current good log dirs are "
      + StringUtils.join(",", dirsHandler.getLogDirsForRead()));
  final LogKey logKey = new LogKey(containerId);
  final LogValue logValue =
      new LogValue(dirsHandler.getLogDirsForRead(), containerId,
        userUgi.getShortUserName(), logAggregationContext,
        this.uploadedFileMeta, appFinished);
  try {
    writer.append(logKey, logValue);
  } catch (Exception e) {
    LOG.error("Couldn't upload logs for " + containerId
        + ". Skipping this container.", e);
    return new HashSet<Path>();
  }
  this.uploadedFileMeta.addAll(logValue
    .getCurrentUpLoadedFileMeta());
  // if any of the previous uploaded logs have been deleted,
  // we need to remove them from alreadyUploadedLogs
  Iterable<String> mask =
      Iterables.filter(uploadedFileMeta, new Predicate<String>() {
        @Override
        public boolean apply(String next) {
          return logValue.getAllExistingFilesMeta().contains(next);
        }
      });

  this.uploadedFileMeta = Sets.newHashSet(mask);
  return logValue.getCurrentUpLoadedFilesPath();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:33,代码来源:AppLogAggregatorImpl.java

示例2: serviceStart

import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogWriter; //导入依赖的package包/类
@Override
public void serviceStart() throws Exception {
  try {
    // Get user's FileSystem credentials
    final UserGroupInformation userUgi = UserGroupInformation
        .createRemoteUser(user);

    createAppDir(user, applicationId, userUgi, conf,
        remoteNodeTmpLogFileForApp);

    Path remoteNodeLogFileForApp = getRemoteNodeLogFileForApp(conf,
        remoteNodeTmpLogFileForApp,
        ConverterUtils.toApplicationId(applicationId), user);
    LogWriter writer = new LogWriter(conf, remoteNodeLogFileForApp, userUgi);
    List<ContainerId> containers = getAllContainers(applicationId, conf);
    LOG.info("Starting Log aggregation for containers : " + containers);
    String[] hpcLogDir = HPCConfiguration.getHPCLogDirs(conf);
    List<String> logDirs = Arrays.asList(hpcLogDir);
    for (ContainerId containerId : containers) {
      LogKey logKey = new LogKey(containerId);
      LogValue logValue = new LogValue(logDirs, containerId,
          userUgi.getShortUserName());
      writer.append(logKey, logValue);
    }
    writer.close();
    LOG.info("Log aggregation has completed.");

    // Remove the log files from local dir's
    delete(applicationId, hpcLogDir);

    // Clean up container work dirs
    delete(applicationId, HPCConfiguration.getHPCLocalDirs(conf));

  } catch (Throwable e) {
    throw new RuntimeException("Failed to complete aggregation on "
        + hostName + "for application " + applicationId, e);
  }
  super.serviceStart();
}
 
开发者ID:intel-hpdd,项目名称:scheduling-connector-for-hadoop,代码行数:40,代码来源:HPCLogAggregateHandler.java

示例3: doContainerLogAggregation

import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogWriter; //导入依赖的package包/类
public Set<Path> doContainerLogAggregation(LogWriter writer) {
  LOG.info("Uploading logs for container " + containerId
      + ". Current good log dirs are "
      + StringUtils.join(",", dirsHandler.getLogDirs()));
  final LogKey logKey = new LogKey(containerId);
  final LogValue logValue =
      new LogValue(dirsHandler.getLogDirs(), containerId,
        userUgi.getShortUserName(), logAggregationContext,
        this.uploadedFileMeta);
  try {
    writer.append(logKey, logValue);
  } catch (Exception e) {
    LOG.error("Couldn't upload logs for " + containerId
        + ". Skipping this container.");
    return new HashSet<Path>();
  }
  this.uploadedFileMeta.addAll(logValue
    .getCurrentUpLoadedFileMeta());
  // if any of the previous uploaded logs have been deleted,
  // we need to remove them from alreadyUploadedLogs
  Iterable<String> mask =
      Iterables.filter(uploadedFileMeta, new Predicate<String>() {
        @Override
        public boolean apply(String next) {
          return logValue.getAllExistingFilesMeta().contains(next);
        }
      });

  this.uploadedFileMeta = Sets.newHashSet(mask);
  return logValue.getCurrentUpLoadedFilesPath();
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:32,代码来源:AppLogAggregatorImpl.java

示例4: doContainerLogAggregation

import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogWriter; //导入依赖的package包/类
public Set<Path> doContainerLogAggregation(LogWriter writer,
    boolean appFinished, boolean containerFinished) {
  LOG.info("Uploading logs for container " + containerId
      + ". Current good log dirs are "
      + StringUtils.join(",", dirsHandler.getLogDirsForRead()));
  final LogKey logKey = new LogKey(containerId);
  final LogValue logValue =
      new LogValue(dirsHandler.getLogDirsForRead(), containerId,
          userUgi.getShortUserName(), logAggregationContext,
          this.uploadedFileMeta, appFinished, containerFinished, userFolder);
  try {
    writer.append(logKey, logValue);
  } catch (Exception e) {
    LOG.error("Couldn't upload logs for " + containerId
        + ". Skipping this container.", e);
    return new HashSet<Path>();
  }
  this.uploadedFileMeta.addAll(logValue
    .getCurrentUpLoadedFileMeta());
  // if any of the previous uploaded logs have been deleted,
  // we need to remove them from alreadyUploadedLogs
  Iterable<String> mask =
      Iterables.filter(uploadedFileMeta, new Predicate<String>() {
        @Override
        public boolean apply(String next) {
          return logValue.getAllExistingFilesMeta().contains(next);
        }
      });

  this.uploadedFileMeta = Sets.newHashSet(mask);
  return logValue.getCurrentUpLoadedFilesPath();
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:33,代码来源:AppLogAggregatorImpl.java

示例5: writeSrcFileAndALog

import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogWriter; //导入依赖的package包/类
private void writeSrcFileAndALog(Path srcFilePath, String fileName, final long length,
    Path remoteAppLogFile, Path srcFileRoot, ContainerId testContainerId)
    throws Exception {
  File dir = new File(srcFilePath.toString());
  if (!dir.exists()) {
    if (!dir.mkdirs()) {
      throw new IOException("Unable to create directory : " + dir);
    }
  }

  File outputFile = new File(new File(srcFilePath.toString()), fileName);
  FileOutputStream os = new FileOutputStream(outputFile);
  final OutputStreamWriter osw = new OutputStreamWriter(os, "UTF8");
  final int ch = filler;

  UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
  LogWriter logWriter = new LogWriter(conf, remoteAppLogFile, ugi);

  LogKey logKey = new LogKey(testContainerId);
  LogValue logValue =
      spy(new LogValue(Collections.singletonList(srcFileRoot.toString()),
          testContainerId, ugi.getShortUserName()));

  final CountDownLatch latch = new CountDownLatch(1);

  Thread t = new Thread() {
    public void run() {
      try {
        for(int i=0; i < length/3; i++) {
            osw.write(ch);
        }

        latch.countDown();

        for(int i=0; i < (2*length)/3; i++) {
          osw.write(ch);
        }
        osw.close();
      } catch (IOException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
      }
    }
  };
  t.start();

  //Wait till the osw is partially written
  //aggregation starts once the ows has completed 1/3rd of its work
  latch.await();

  //Aggregate The Logs
  logWriter.append(logKey, logValue);
  logWriter.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:55,代码来源:TestAggregatedLogFormat.java

示例6: testReadAcontainerLogs1

import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogWriter; //导入依赖的package包/类
@Test
public void testReadAcontainerLogs1() throws Exception {
  Configuration conf = new Configuration();
  File workDir = new File(testWorkDir, "testReadAcontainerLogs1");
  Path remoteAppLogFile =
      new Path(workDir.getAbsolutePath(), "aggregatedLogFile");
  Path srcFileRoot = new Path(workDir.getAbsolutePath(), "srcFiles");
  ContainerId testContainerId = TestContainerId.newContainerId(1, 1, 1, 1);
  Path t =
      new Path(srcFileRoot, testContainerId.getApplicationAttemptId()
          .getApplicationId().toString());
  Path srcFilePath = new Path(t, testContainerId.toString());

  int numChars = 80000;

  writeSrcFile(srcFilePath, "stdout", numChars);

  UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
  LogWriter logWriter = new LogWriter(conf, remoteAppLogFile, ugi);

  LogKey logKey = new LogKey(testContainerId);
  LogValue logValue =
      new LogValue(Collections.singletonList(srcFileRoot.toString()),
          testContainerId, ugi.getShortUserName());

  logWriter.append(logKey, logValue);
  logWriter.closeWriter();

  // make sure permission are correct on the file
  FileStatus fsStatus =  fs.getFileStatus(remoteAppLogFile);
  Assert.assertEquals("permissions on log aggregation file are wrong",  
    FsPermission.createImmutable((short) 0640), fsStatus.getPermission()); 

  LogReader logReader = new LogReader(conf, remoteAppLogFile);
  LogKey rLogKey = new LogKey();
  DataInputStream dis = logReader.next(rLogKey);
  Writer writer = new StringWriter();
  LogReader.readAcontainerLogs(dis, writer);
  
  String s = writer.toString();
  int expectedLength =
      "\n\nLogType:stdout".length() + ("\nLogLength:" + numChars).length()
          + "\nLog Contents:\n".length() + numChars;
  Assert.assertTrue("LogType not matched", s.contains("LogType:stdout"));
  Assert.assertTrue("LogLength not matched", s.contains("LogLength:" + numChars));
  Assert.assertTrue("Log Contents not matched", s.contains("Log Contents"));
  
  StringBuilder sb = new StringBuilder();
  for (int i = 0 ; i < numChars ; i++) {
    sb.append(filler);
  }
  String expectedContent = sb.toString();
  Assert.assertTrue("Log content incorrect", s.contains(expectedContent));
  
  Assert.assertEquals(expectedLength, s.length());
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:57,代码来源:TestAggregatedLogFormat.java

示例7: writeSrcFileAndALog

import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogWriter; //导入依赖的package包/类
private void writeSrcFileAndALog(Path srcFilePath, String fileName, final long length,
    Path remoteAppLogFile, Path srcFileRoot, ContainerId testContainerId)
    throws Exception {
  File dir = new File(srcFilePath.toString());
  if (!dir.exists()) {
    if (!dir.mkdirs()) {
      throw new IOException("Unable to create directory : " + dir);
    }
  }

  File outputFile = new File(new File(srcFilePath.toString()), fileName);
  FileOutputStream os = new FileOutputStream(outputFile);
  final OutputStreamWriter osw = new OutputStreamWriter(os, "UTF8");
  final int ch = filler;

  UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
  try (LogWriter logWriter = new LogWriter()) {
    logWriter.initialize(conf, remoteAppLogFile, ugi);

    LogKey logKey = new LogKey(testContainerId);
    LogValue logValue =
        spy(new LogValue(Collections.singletonList(srcFileRoot.toString()),
            testContainerId, ugi.getShortUserName(),ugi.getShortUserName()));

    final CountDownLatch latch = new CountDownLatch(1);

    Thread t = new Thread() {
      public void run() {
        try {
          for (int i = 0; i < length / 3; i++) {
            osw.write(ch);
          }

          latch.countDown();

          for (int i = 0; i < (2 * length) / 3; i++) {
            osw.write(ch);
          }
          osw.close();
        } catch (IOException e) {
          // TODO Auto-generated catch block
          e.printStackTrace();
        }
      }
    };
    t.start();

    //Wait till the osw is partially written
    //aggregation starts once the ows has completed 1/3rd of its work
    latch.await();

    //Aggregate The Logs
    logWriter.append(logKey, logValue);
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:56,代码来源:TestAggregatedLogFormat.java

示例8: testReadAcontainerLogs1

import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogWriter; //导入依赖的package包/类
@Test
public void testReadAcontainerLogs1() throws Exception {
  Configuration conf = new Configuration();
  File workDir = new File(testWorkDir, "testReadAcontainerLogs1");
  Path remoteAppLogFile =
      new Path(workDir.getAbsolutePath(), "aggregatedLogFile");
  Path srcFileRoot = new Path(workDir.getAbsolutePath(), "srcFiles");
  ContainerId testContainerId = TestContainerId.newContainerId(1, 1, 1, 1);
  Path t =
      new Path(srcFileRoot, testContainerId.getApplicationAttemptId()
          .getApplicationId().toString());
  Path srcFilePath = new Path(t, testContainerId.toString());

  int numChars = 80000;

  writeSrcFile(srcFilePath, "stdout", numChars);

  UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
  LogWriter logWriter = new LogWriter(conf, remoteAppLogFile, ugi);

  LogKey logKey = new LogKey(testContainerId);
  LogValue logValue =
      new LogValue(Collections.singletonList(srcFileRoot.toString()),
          testContainerId, ugi.getShortUserName());

  logWriter.append(logKey, logValue);
  logWriter.close();

  // make sure permission are correct on the file
  FileStatus fsStatus =  fs.getFileStatus(remoteAppLogFile);
  Assert.assertEquals("permissions on log aggregation file are wrong",  
    FsPermission.createImmutable((short) 0640), fsStatus.getPermission()); 

  LogReader logReader = new LogReader(conf, remoteAppLogFile);
  LogKey rLogKey = new LogKey();
  DataInputStream dis = logReader.next(rLogKey);
  Writer writer = new StringWriter();
  LogReader.readAcontainerLogs(dis, writer);
  
  String s = writer.toString();
  int expectedLength =
      "\n\nLogType:stdout".length() + ("\nLogLength:" + numChars).length()
          + "\nLog Contents:\n".length() + numChars;
  Assert.assertTrue("LogType not matched", s.contains("LogType:stdout"));
  Assert.assertTrue("LogLength not matched", s.contains("LogLength:" + numChars));
  Assert.assertTrue("Log Contents not matched", s.contains("Log Contents"));
  
  StringBuilder sb = new StringBuilder();
  for (int i = 0 ; i < numChars ; i++) {
    sb.append(filler);
  }
  String expectedContent = sb.toString();
  Assert.assertTrue("Log content incorrect", s.contains(expectedContent));
  
  Assert.assertEquals(expectedLength, s.length());
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:57,代码来源:TestAggregatedLogFormat.java


注:本文中的org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogWriter类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。