当前位置: 首页>>代码示例>>Java>>正文


Java LogAggregationUtils类代码示例

本文整理汇总了Java中org.apache.hadoop.yarn.logaggregation.LogAggregationUtils的典型用法代码示例。如果您正苦于以下问题:Java LogAggregationUtils类的具体用法?Java LogAggregationUtils怎么用?Java LogAggregationUtils使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


LogAggregationUtils类属于org.apache.hadoop.yarn.logaggregation包,在下文中一共展示了LogAggregationUtils类的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: uploadContainerLogIntoRemoteDir

import org.apache.hadoop.yarn.logaggregation.LogAggregationUtils; //导入依赖的package包/类
private static void uploadContainerLogIntoRemoteDir(UserGroupInformation ugi,
    Configuration configuration, List<String> rootLogDirs, NodeId nodeId,
    ContainerId containerId, Path appDir, FileSystem fs) throws Exception {
  Path path =
      new Path(appDir, LogAggregationUtils.getNodeString(nodeId)
          + System.currentTimeMillis());
  AggregatedLogFormat.LogWriter writer =
      new AggregatedLogFormat.LogWriter(configuration, path, ugi);
  writer.writeApplicationOwner(ugi.getUserName());

  Map<ApplicationAccessType, String> appAcls =
      new HashMap<ApplicationAccessType, String>();
  appAcls.put(ApplicationAccessType.VIEW_APP, ugi.getUserName());
  writer.writeApplicationACLs(appAcls);
  writer.append(new AggregatedLogFormat.LogKey(containerId),
    new AggregatedLogFormat.LogValue(rootLogDirs, containerId,
      UserGroupInformation.getCurrentUser().getShortUserName()));
  writer.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestLogsCLI.java

示例2: uploadEmptyContainerLogIntoRemoteDir

import org.apache.hadoop.yarn.logaggregation.LogAggregationUtils; //导入依赖的package包/类
private static void uploadEmptyContainerLogIntoRemoteDir(UserGroupInformation ugi,
    Configuration configuration, List<String> rootLogDirs, NodeId nodeId,
    ContainerId containerId, Path appDir, FileSystem fs) throws Exception {
  Path path =
      new Path(appDir, LogAggregationUtils.getNodeString(nodeId)
          + System.currentTimeMillis());
  AggregatedLogFormat.LogWriter writer =
      new AggregatedLogFormat.LogWriter(configuration, path, ugi);
  writer.writeApplicationOwner(ugi.getUserName());

  Map<ApplicationAccessType, String> appAcls =
      new HashMap<ApplicationAccessType, String>();
  appAcls.put(ApplicationAccessType.VIEW_APP, ugi.getUserName());
  writer.writeApplicationACLs(appAcls);
  DataOutputStream out = writer.getWriter().prepareAppendKey(-1);
  new AggregatedLogFormat.LogKey(containerId).write(out);
  out.close();
  out = writer.getWriter().prepareAppendValue(-1);
  new AggregatedLogFormat.LogValue(rootLogDirs, containerId,
    UserGroupInformation.getCurrentUser().getShortUserName()).write(out,
    new HashSet<File>());
  out.close();
  writer.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:TestLogsCLI.java

示例3: createAppDir

import org.apache.hadoop.yarn.logaggregation.LogAggregationUtils; //导入依赖的package包/类
private Path createAppDir(String user, String applicationId,
    UserGroupInformation userUgi, Configuration conf,
    Path remoteNodeTmpLogFileForApp) throws IOException {
  FileSystem remoteFS = FileSystem.get(conf);

  // Only creating directories if they are missing to avoid
  // unnecessary load on the filesystem from all of the nodes
  String remoteRootLogDirSuffix = conf.get(
      YarnConfiguration.NM_REMOTE_APP_LOG_DIR_SUFFIX,
      YarnConfiguration.DEFAULT_NM_REMOTE_APP_LOG_DIR_SUFFIX);
  Path appDir = LogAggregationUtils.getRemoteAppLogDir(
      remoteNodeTmpLogFileForApp,
      ConverterUtils.toApplicationId(applicationId), user,
      remoteRootLogDirSuffix);
  appDir = appDir.makeQualified(remoteFS.getUri(),
      remoteFS.getWorkingDirectory());

  if (!checkExists(remoteFS, appDir, APP_DIR_PERMISSIONS)) {
    Path suffixDir = LogAggregationUtils.getRemoteLogSuffixedDir(
        remoteNodeTmpLogFileForApp, user, remoteRootLogDirSuffix);
    suffixDir = suffixDir.makeQualified(remoteFS.getUri(),
        remoteFS.getWorkingDirectory());

    if (!checkExists(remoteFS, suffixDir, APP_DIR_PERMISSIONS)) {
      Path userDir = LogAggregationUtils.getRemoteLogUserDir(
          remoteNodeTmpLogFileForApp, user);
      userDir = userDir.makeQualified(remoteFS.getUri(),
          remoteFS.getWorkingDirectory());

      if (!checkExists(remoteFS, userDir, APP_DIR_PERMISSIONS)) {
        createDir(remoteFS, userDir, APP_DIR_PERMISSIONS);
      }

      createDir(remoteFS, suffixDir, APP_DIR_PERMISSIONS);
    }

    createDir(remoteFS, appDir, APP_DIR_PERMISSIONS);
  }
  return appDir;
}
 
开发者ID:intel-hpdd,项目名称:scheduling-connector-for-hadoop,代码行数:41,代码来源:HPCLogAggregateHandler.java

示例4: uploadContainerLogIntoRemoteDir

import org.apache.hadoop.yarn.logaggregation.LogAggregationUtils; //导入依赖的package包/类
private static void uploadContainerLogIntoRemoteDir(UserGroupInformation ugi,
    Configuration configuration, List<String> rootLogDirs, NodeId nodeId,
    ContainerId containerId, Path appDir, FileSystem fs) throws Exception {
  Path path =
      new Path(appDir, LogAggregationUtils.getNodeString(nodeId)
          + System.currentTimeMillis());
  try (AggregatedLogFormat.LogWriter writer =
           new AggregatedLogFormat.LogWriter()) {
    writer.initialize(configuration, path, ugi);
    writer.writeApplicationOwner(ugi.getUserName());

    Map<ApplicationAccessType, String> appAcls = new HashMap<>();
    appAcls.put(ApplicationAccessType.VIEW_APP, ugi.getUserName());
    writer.writeApplicationACLs(appAcls);
    writer.append(new AggregatedLogFormat.LogKey(containerId),
        new AggregatedLogFormat.LogValue(rootLogDirs, containerId,
            UserGroupInformation.getCurrentUser().getShortUserName(), UserGroupInformation.getCurrentUser().getShortUserName()));
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:20,代码来源:TestLogsCLI.java

示例5: uploadEmptyContainerLogIntoRemoteDir

import org.apache.hadoop.yarn.logaggregation.LogAggregationUtils; //导入依赖的package包/类
private static void uploadEmptyContainerLogIntoRemoteDir(UserGroupInformation ugi,
    Configuration configuration, List<String> rootLogDirs, NodeId nodeId,
    ContainerId containerId, Path appDir, FileSystem fs) throws Exception {
  Path path =
      new Path(appDir, LogAggregationUtils.getNodeString(nodeId)
          + System.currentTimeMillis());
  try (AggregatedLogFormat.LogWriter writer =
           new AggregatedLogFormat.LogWriter()) {
    writer.initialize(configuration, path, ugi);
    writer.writeApplicationOwner(ugi.getUserName());

    Map<ApplicationAccessType, String> appAcls = new HashMap<>();
    appAcls.put(ApplicationAccessType.VIEW_APP, ugi.getUserName());
    writer.writeApplicationACLs(appAcls);
    DataOutputStream out = writer.getWriter().prepareAppendKey(-1);
    new AggregatedLogFormat.LogKey(containerId).write(out);
    out.close();
    out = writer.getWriter().prepareAppendValue(-1);
    new AggregatedLogFormat.LogValue(rootLogDirs, containerId,
        UserGroupInformation.getCurrentUser().getShortUserName(), UserGroupInformation.getCurrentUser().getShortUserName()).write(out,
        new HashSet<File>());
    out.close();
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:25,代码来源:TestLogsCLI.java

示例6: numOfLogsAvailable

import org.apache.hadoop.yarn.logaggregation.LogAggregationUtils; //导入依赖的package包/类
private int numOfLogsAvailable(LogAggregationService logAggregationService,
    ApplicationId appId, boolean sizeLimited, String lastLogFile)
    throws IOException {
  Path appLogDir = logAggregationService.getRemoteAppLogDir(appId, this.user);
  RemoteIterator<FileStatus> nodeFiles = null;
  try {
    Path qualifiedLogDir =
        FileContext.getFileContext(this.conf).makeQualified(appLogDir);
    nodeFiles =
        FileContext.getFileContext(qualifiedLogDir.toUri(), this.conf)
          .listStatus(appLogDir);
  } catch (FileNotFoundException fnf) {
    return -1;
  }
  int count = 0;
  while (nodeFiles.hasNext()) {
    FileStatus status = nodeFiles.next();
    String filename = status.getPath().getName();
    if (filename.contains(LogAggregationUtils.TMP_FILE_SUFFIX)
        || (lastLogFile != null && filename.contains(lastLogFile)
            && sizeLimited)) {
      return -1;
    }
    if (filename.contains(LogAggregationUtils
      .getNodeString(logAggregationService.getNodeId()))) {
      count++;
    }
  }
  return count;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:31,代码来源:TestLogAggregationService.java

示例7: getRemoteNodeLogFileForApp

import org.apache.hadoop.yarn.logaggregation.LogAggregationUtils; //导入依赖的package包/类
private Path getRemoteNodeLogFileForApp(Configuration conf,
    Path remoteNodeTmpLogFileForApp, ApplicationId applicationId, String user) {
  return LogAggregationUtils.getRemoteNodeLogFileForApp(
      remoteNodeTmpLogFileForApp, applicationId, user, NodeId.newInstance(
          hostName, 0), conf.get(
          YarnConfiguration.NM_REMOTE_APP_LOG_DIR_SUFFIX,
          YarnConfiguration.DEFAULT_NM_REMOTE_APP_LOG_DIR_SUFFIX));
}
 
开发者ID:intel-hpdd,项目名称:scheduling-connector-for-hadoop,代码行数:9,代码来源:HPCLogAggregateHandler.java

示例8: numOfLogsAvailable

import org.apache.hadoop.yarn.logaggregation.LogAggregationUtils; //导入依赖的package包/类
private int numOfLogsAvailable(LogAggregationService logAggregationService,
    ApplicationId appId, boolean sizeLimited, String lastLogFile)
    throws IOException {
  Path appLogDir = logAggregationService.getRemoteAppLogDir(appId, this.user);
  RemoteIterator<FileStatus> nodeFiles = null;
  try {
    Path qualifiedLogDir =
        FileContext.getFileContext(this.conf).makeQualified(appLogDir);
    nodeFiles =
        FileContext.getFileContext(qualifiedLogDir.toUri(), this.conf)
          .listStatus(appLogDir);
  } catch (FileNotFoundException fnf) {
    LOG.info("Context file not vailable: " + fnf);
    return -1;
  }
  int count = 0;
  while (nodeFiles.hasNext()) {
    FileStatus status = nodeFiles.next();
    String filename = status.getPath().getName();
    if (filename.contains(LogAggregationUtils.TMP_FILE_SUFFIX)
        || (lastLogFile != null && filename.contains(lastLogFile)
            && sizeLimited)) {
      LOG.info("fileName :" + filename);
      LOG.info("lastLogFile :" + lastLogFile);
      return -1;
    }
    if (filename.contains(LogAggregationUtils
      .getNodeString(logAggregationService.getNodeId()))) {
      LOG.info("Node list filename :" + filename);
      count++;
    }
  }
  LOG.info("File Count :" + count);
  return count;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:36,代码来源:TestLogAggregationService.java

示例9: getRemoteNodeTmpLogFileForApp

import org.apache.hadoop.yarn.logaggregation.LogAggregationUtils; //导入依赖的package包/类
private Path getRemoteNodeTmpLogFileForApp() {
  return new Path(remoteNodeLogFileForApp.getParent(),
    (remoteNodeLogFileForApp.getName() + LogAggregationUtils.TMP_FILE_SUFFIX));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:5,代码来源:AppLogAggregatorImpl.java

示例10: getRemoteNodeLogFileForApp

import org.apache.hadoop.yarn.logaggregation.LogAggregationUtils; //导入依赖的package包/类
Path getRemoteNodeLogFileForApp(ApplicationId appId, String user) {
  return LogAggregationUtils.getRemoteNodeLogFileForApp(
      this.remoteRootLogDir, appId, user, this.nodeId,
      this.remoteRootLogDirSuffix);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:6,代码来源:LogAggregationService.java

示例11: getRemoteAppLogDir

import org.apache.hadoop.yarn.logaggregation.LogAggregationUtils; //导入依赖的package包/类
Path getRemoteAppLogDir(ApplicationId appId, String user) {
  return LogAggregationUtils.getRemoteAppLogDir(this.remoteRootLogDir, appId,
      user, this.remoteRootLogDirSuffix);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:5,代码来源:LogAggregationService.java

示例12: run

import org.apache.hadoop.yarn.logaggregation.LogAggregationUtils; //导入依赖的package包/类
@Override
public int run(String[] args) throws Exception {
  int exitCode = 1;

  handleOpts(args);

  FileSystem fs = null;
  Path remoteRootLogDir = new Path(conf.get(
      YarnConfiguration.NM_REMOTE_APP_LOG_DIR,
      YarnConfiguration.DEFAULT_NM_REMOTE_APP_LOG_DIR));
  String suffix = LogAggregationUtils.getRemoteNodeLogDirSuffix(conf);
  Path workingDir = new Path(remoteRootLogDir, "archive-logs-work");
  if (verbose) {
    LOG.info("Remote Log Dir Root: " + remoteRootLogDir);
    LOG.info("Log Suffix: " + suffix);
    LOG.info("Working Dir: " + workingDir);
  }
  try {
    fs = FileSystem.get(conf);
    if (prepareWorkingDir(fs, workingDir)) {

      checkFilesAndSeedApps(fs, remoteRootLogDir, suffix);

      filterAppsByAggregatedStatus();

      checkMaxEligible();

      if (eligibleApplications.isEmpty()) {
        LOG.info("No eligible applications to process");
        exitCode = 0;
      } else {
        StringBuilder sb =
            new StringBuilder("Will process the following applications:");
        for (AppInfo app : eligibleApplications) {
          sb.append("\n\t").append(app.getAppId());
        }
        LOG.info(sb.toString());

        File localScript = File.createTempFile("hadoop-archive-logs-", ".sh");
        generateScript(localScript, workingDir, remoteRootLogDir, suffix);

        exitCode = runDistributedShell(localScript) ? 0 : 1;
      }
    }
  } finally {
    if (fs != null) {
      // Cleanup working directory
      if (fs.exists(workingDir)) {
        fs.delete(workingDir, true);
      }
      fs.close();
    }
  }
  return exitCode;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:56,代码来源:HadoopArchiveLogs.java


注:本文中的org.apache.hadoop.yarn.logaggregation.LogAggregationUtils类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。