当前位置: 首页>>代码示例>>Java>>正文


Java JobSubmissionFiles.getStagingDir方法代码示例

本文整理汇总了Java中org.apache.hadoop.mapreduce.JobSubmissionFiles.getStagingDir方法的典型用法代码示例。如果您正苦于以下问题:Java JobSubmissionFiles.getStagingDir方法的具体用法?Java JobSubmissionFiles.getStagingDir怎么用?Java JobSubmissionFiles.getStagingDir使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.mapreduce.JobSubmissionFiles的用法示例。


在下文中一共展示了JobSubmissionFiles.getStagingDir方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testCleanup

import org.apache.hadoop.mapreduce.JobSubmissionFiles; //导入方法依赖的package包/类
/**
* test methods run end execute of DistCp class. silple copy file
* @throws Exception 
*/
 @Test
 public void testCleanup() throws Exception {

     Configuration conf = getConf();

     Path stagingDir = JobSubmissionFiles.getStagingDir(new Cluster(conf),
         conf);
     stagingDir.getFileSystem(conf).mkdirs(stagingDir);
     Path soure = createFile("tmp.txt");
     Path target = createFile("target.txt");

     DistCp distcp = new DistCp(conf, null);
     String[] arg = { soure.toString(), target.toString() };

     distcp.run(arg);
     Assert.assertTrue(fs.exists(target));

 
 }
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:TestExternalCall.java

示例2: testCleanupTestViaToolRunner

import org.apache.hadoop.mapreduce.JobSubmissionFiles; //导入方法依赖的package包/类
/**
 * test main method of DistCp. Method should to call System.exit().
 * 
 */
@Test
public void testCleanupTestViaToolRunner() throws IOException, InterruptedException {

  Configuration conf = getConf();

  Path stagingDir = JobSubmissionFiles.getStagingDir(new Cluster(conf), conf);
  stagingDir.getFileSystem(conf).mkdirs(stagingDir);
 
  Path soure = createFile("tmp.txt");
  Path target = createFile("target.txt");
  try {

    String[] arg = {target.toString(),soure.toString()};
    DistCp.main(arg);
    Assert.fail();

  } catch (ExitException t) {
    Assert.assertTrue(fs.exists(target));
    Assert.assertEquals(t.status, 0);
    Assert.assertEquals(
        stagingDir.getFileSystem(conf).listStatus(stagingDir).length, 0);
  }

}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:TestExternalCall.java

示例3: testCleanup

import org.apache.hadoop.mapreduce.JobSubmissionFiles; //导入方法依赖的package包/类
@Test(timeout=100000)
public void testCleanup() {
  try {
    Path sourcePath = new Path("noscheme:///file");
    List<Path> sources = new ArrayList<Path>();
    sources.add(sourcePath);

    DistCpOptions options = new DistCpOptions(sources, target);

    Configuration conf = getConf();
    Path stagingDir = JobSubmissionFiles.getStagingDir(
            new Cluster(conf), conf);
    stagingDir.getFileSystem(conf).mkdirs(stagingDir);

    try {
      new DistCp(conf, options).execute();
    } catch (Throwable t) {
      Assert.assertEquals(stagingDir.getFileSystem(conf).
          listStatus(stagingDir).length, 0);
    }
  } catch (Exception e) {
    LOG.error("Exception encountered ", e);
    Assert.fail("testCleanup failed " + e.getMessage());
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:TestIntegration.java

示例4: testCleanup

import org.apache.hadoop.mapreduce.JobSubmissionFiles; //导入方法依赖的package包/类
@Test
public void testCleanup() {
  try {
    Path sourcePath = new Path("noscheme:///file");
    List<Path> sources = new ArrayList<Path>();
    sources.add(sourcePath);

    DistCpOptions options = new DistCpOptions(sources, target);

    Configuration conf = getConf();
    Path stagingDir = JobSubmissionFiles.getStagingDir(
            new Cluster(conf), conf);
    stagingDir.getFileSystem(conf).mkdirs(stagingDir);

    try {
      new DistCp(conf, options).execute();
    } catch (Throwable t) {
      Assert.assertEquals(stagingDir.getFileSystem(conf).
          listStatus(stagingDir).length, 0);
    }
  } catch (Exception e) {
    LOG.error("Exception encountered ", e);
    Assert.fail("testCleanup failed " + e.getMessage());
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:26,代码来源:TestIntegration.java

示例5: createMetaFolderPath

import org.apache.hadoop.mapreduce.JobSubmissionFiles; //导入方法依赖的package包/类
/**
 * Create a default working folder for the job, under the job staging directory
 *
 * @return Returns the working folder information
 * @throws Exception - EXception if any
 */
private Path createMetaFolderPath() throws Exception {
  Configuration configuration = getConf();
  Path stagingDir = JobSubmissionFiles.getStagingDir(new Cluster(configuration), configuration);
  Path metaFolderPath = new Path(stagingDir, PREFIX + String.valueOf(rand.nextInt()));
  LOG.debug("Meta folder location: {}", metaFolderPath);
  configuration.set(S3MapReduceCpConstants.CONF_LABEL_META_FOLDER, metaFolderPath.toString());
  return metaFolderPath;
}
 
开发者ID:HotelsDotCom,项目名称:circus-train,代码行数:15,代码来源:S3MapReduceCp.java

示例6: createMetaFolderPath

import org.apache.hadoop.mapreduce.JobSubmissionFiles; //导入方法依赖的package包/类
/**
 * Create a default working folder for the job, under the
 * job staging directory
 *
 * @return Returns the working folder information
 * @throws Exception - EXception if any
 */
private Path createMetaFolderPath() throws Exception {
  Configuration configuration = getConf();
  Path stagingDir = JobSubmissionFiles.getStagingDir(
          new Cluster(configuration), configuration);
  Path metaFolderPath = new Path(stagingDir, PREFIX + String.valueOf(rand.nextInt()));
  if (LOG.isDebugEnabled())
    LOG.debug("Meta folder location: " + metaFolderPath);
  configuration.set(DistCpConstants.CONF_LABEL_META_FOLDER, metaFolderPath.toString());    
  return metaFolderPath;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:DistCp.java

示例7: createMetaFolderPath

import org.apache.hadoop.mapreduce.JobSubmissionFiles; //导入方法依赖的package包/类
/**
 * Create a default working folder for the job, under the
 * job staging directory
 *
 * @return Returns the working folder information
 * @throws Exception - Exception if any
 */
private Path createMetaFolderPath() throws Exception {
  Configuration configuration = getConf();
  Path stagingDir = JobSubmissionFiles.getStagingDir(
          new Cluster(configuration), configuration);
  Path metaFolderPath = new Path(stagingDir, PREFIX + String.valueOf(rand.nextInt()));
  if (LOG.isDebugEnabled())
    LOG.debug("Meta folder location: " + metaFolderPath);
  configuration.set(DistCpConstants.CONF_LABEL_META_FOLDER, metaFolderPath.toString());    
  return metaFolderPath;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:18,代码来源:DistCp.java

示例8: setup

import org.apache.hadoop.mapreduce.JobSubmissionFiles; //导入方法依赖的package包/类
private boolean setup(List<FileOperation> ops, Path log) 
throws IOException {
  final String randomId = getRandomId();
  JobClient jClient = new JobClient(jobconf);
  Path stagingArea;
  try {
    stagingArea = JobSubmissionFiles.getStagingDir(
                     jClient.getClusterHandle(), jobconf);
  } catch (InterruptedException ie){
    throw new IOException(ie);
  }
  Path jobdir = new Path(stagingArea + NAME + "_" + randomId);
  FsPermission mapredSysPerms = 
    new FsPermission(JobSubmissionFiles.JOB_DIR_PERMISSION);
  FileSystem.mkdirs(jClient.getFs(), jobdir, mapredSysPerms);
  LOG.info(JOB_DIR_LABEL + "=" + jobdir);

  if (log == null) {
    log = new Path(jobdir, "_logs");
  }
  FileOutputFormat.setOutputPath(jobconf, log);
  LOG.info("log=" + log);

  //create operation list
  FileSystem fs = jobdir.getFileSystem(jobconf);
  Path opList = new Path(jobdir, "_" + OP_LIST_LABEL);
  jobconf.set(OP_LIST_LABEL, opList.toString());
  int opCount = 0, synCount = 0;
  try (SequenceFile.Writer opWriter = SequenceFile.createWriter(fs, jobconf, opList, Text.class,
          FileOperation.class, SequenceFile.CompressionType.NONE)) {
    for(FileOperation op : ops) {
      FileStatus srcstat = fs.getFileStatus(op.src); 
      if (srcstat.isDirectory() && op.isDifferent(srcstat)) {
        ++opCount;
        opWriter.append(new Text(op.src.toString()), op);
      }

      Stack<Path> pathstack = new Stack<Path>();
      for(pathstack.push(op.src); !pathstack.empty(); ) {
        for(FileStatus stat : fs.listStatus(pathstack.pop())) {
          if (stat.isDirectory()) {
            pathstack.push(stat.getPath());
          }

          if (op.isDifferent(stat)) {              
            ++opCount;
            if (++synCount > SYNC_FILE_MAX) {
              opWriter.sync();
              synCount = 0;
            }
            Path f = stat.getPath();
            opWriter.append(new Text(f.toString()), new FileOperation(f, op));
          }
        }
      }
    }
  }

  checkDuplication(fs, opList, new Path(jobdir, "_sorted"), jobconf);
  jobconf.setInt(OP_COUNT_LABEL, opCount);
  LOG.info(OP_COUNT_LABEL + "=" + opCount);
  jobconf.setNumMapTasks(getMapCount(opCount,
      new JobClient(jobconf).getClusterStatus().getTaskTrackers()));
  return opCount != 0;    
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:66,代码来源:DistCh.java

示例9: setup

import org.apache.hadoop.mapreduce.JobSubmissionFiles; //导入方法依赖的package包/类
private boolean setup(List<FileOperation> ops, Path log) 
throws IOException {
  final String randomId = getRandomId();
  JobClient jClient = new JobClient(jobconf);
  Path stagingArea;
  try {
    stagingArea = JobSubmissionFiles.getStagingDir(
                     jClient.getClusterHandle(), jobconf);
  } catch (InterruptedException ie){
    throw new IOException(ie);
  }
  Path jobdir = new Path(stagingArea + NAME + "_" + randomId);
  FsPermission mapredSysPerms = 
    new FsPermission(JobSubmissionFiles.JOB_DIR_PERMISSION);
  FileSystem.mkdirs(jClient.getFs(), jobdir, mapredSysPerms);
  LOG.info(JOB_DIR_LABEL + "=" + jobdir);

  if (log == null) {
    log = new Path(jobdir, "_logs");
  }
  FileOutputFormat.setOutputPath(jobconf, log);
  LOG.info("log=" + log);

  //create operation list
  FileSystem fs = jobdir.getFileSystem(jobconf);
  Path opList = new Path(jobdir, "_" + OP_LIST_LABEL);
  jobconf.set(OP_LIST_LABEL, opList.toString());
  int opCount = 0, synCount = 0;
  SequenceFile.Writer opWriter = null;
  try {
    opWriter = SequenceFile.createWriter(fs, jobconf, opList, Text.class,
        FileOperation.class, SequenceFile.CompressionType.NONE);
    for(FileOperation op : ops) {
      FileStatus srcstat = fs.getFileStatus(op.src); 
      if (srcstat.isDirectory() && op.isDifferent(srcstat)) {
        ++opCount;
        opWriter.append(new Text(op.src.toString()), op);
      }

      Stack<Path> pathstack = new Stack<Path>();
      for(pathstack.push(op.src); !pathstack.empty(); ) {
        for(FileStatus stat : fs.listStatus(pathstack.pop())) {
          if (stat.isDirectory()) {
            pathstack.push(stat.getPath());
          }

          if (op.isDifferent(stat)) {              
            ++opCount;
            if (++synCount > SYNC_FILE_MAX) {
              opWriter.sync();
              synCount = 0;
            }
            Path f = stat.getPath();
            opWriter.append(new Text(f.toString()), new FileOperation(f, op));
          }
        }
      }
    }
  } finally {
    opWriter.close();
  }

  checkDuplication(fs, opList, new Path(jobdir, "_sorted"), jobconf);
  jobconf.setInt(OP_COUNT_LABEL, opCount);
  LOG.info(OP_COUNT_LABEL + "=" + opCount);
  jobconf.setNumMapTasks(getMapCount(opCount,
      new JobClient(jobconf).getClusterStatus().getTaskTrackers()));
  return opCount != 0;    
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:70,代码来源:DistCh.java

示例10: setup

import org.apache.hadoop.mapreduce.JobSubmissionFiles; //导入方法依赖的package包/类
private boolean setup(List<FileOperation> ops, Path log) throws IOException {
  final String randomId = getRandomId();
  JobClient jClient = new JobClient(jobconf);
  Path stagingArea;
  try {
    stagingArea = JobSubmissionFiles.getStagingDir(
                     jClient, jobconf);
  } catch (InterruptedException e) {
    throw new IOException(e);
  }
  Path jobdir = new Path(stagingArea + NAME + "_" + randomId);
  FsPermission mapredSysPerms =
    new FsPermission(JobSubmissionFiles.JOB_DIR_PERMISSION);
  FileSystem.mkdirs(jClient.getFs(), jobdir, mapredSysPerms);
  LOG.info(JOB_DIR_LABEL + "=" + jobdir);

  if (log == null) {
    log = new Path(jobdir, "_logs");
  }
  FileOutputFormat.setOutputPath(jobconf, log);
  LOG.info("log=" + log);

  //create operation list
  FileSystem fs = jobdir.getFileSystem(jobconf);
  Path opList = new Path(jobdir, "_" + OP_LIST_LABEL);
  jobconf.set(OP_LIST_LABEL, opList.toString());
  int opCount = 0, synCount = 0;
  SequenceFile.Writer opWriter = null;
  try {
    opWriter = SequenceFile.createWriter(fs, jobconf, opList, Text.class,
        FileOperation.class, SequenceFile.CompressionType.NONE);
    for(FileOperation op : ops) {
      FileStatus srcstat = fs.getFileStatus(op.src); 
      if (srcstat.isDir() && op.isDifferent(srcstat)) {
        ++opCount;
        opWriter.append(new Text(op.src.toString()), op);
      }

      Stack<Path> pathstack = new Stack<Path>();
      for(pathstack.push(op.src); !pathstack.empty(); ) {
        for(FileStatus stat : fs.listStatus(pathstack.pop())) {
          if (stat.isDir()) {
            pathstack.push(stat.getPath());
          }

          if (op.isDifferent(stat)) {              
            ++opCount;
            if (++synCount > SYNC_FILE_MAX) {
              opWriter.sync();
              synCount = 0;
            }
            Path f = stat.getPath();
            opWriter.append(new Text(f.toString()), new FileOperation(f, op));
          }
        }
      }
    }
  } finally {
    opWriter.close();
  }

  checkDuplication(fs, opList, new Path(jobdir, "_sorted"), jobconf);
  jobconf.setInt(OP_COUNT_LABEL, opCount);
  LOG.info(OP_COUNT_LABEL + "=" + opCount);
  jobconf.setNumMapTasks(getMapCount(opCount,
      new JobClient(jobconf).getClusterStatus().getTaskTrackers()));
  return opCount != 0;    
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:69,代码来源:DistCh.java

示例11: getStagingDir

import org.apache.hadoop.mapreduce.JobSubmissionFiles; //导入方法依赖的package包/类
/**
 * Initializes the staging directory and returns the path.
 *
 * @param conf system configuration
 * @return staging directory path
 * @throws IOException
 * @throws InterruptedException
 */
public static Path getStagingDir(Configuration conf)
    throws IOException, InterruptedException {
  return JobSubmissionFiles.getStagingDir(new Cluster(conf), conf);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:13,代码来源:JobUtil.java

示例12: getStagingDir

import org.apache.hadoop.mapreduce.JobSubmissionFiles; //导入方法依赖的package包/类
/**
 * Initializes the staging directory and returns the path.
 *
 * @param conf system configuration
 * @return staging directory path
 * @throws IOException
 * @throws InterruptedException
 */
public static Path getStagingDir(Configuration conf)
    throws IOException, InterruptedException {
  JobClient jobClient = new JobClient(new JobConf(conf));
  return JobSubmissionFiles.getStagingDir(jobClient, conf);
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:14,代码来源:JobUtil.java


注:本文中的org.apache.hadoop.mapreduce.JobSubmissionFiles.getStagingDir方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。