当前位置: 首页>>代码示例>>Java>>正文


Java Cluster类代码示例

本文整理汇总了Java中org.apache.hadoop.mapreduce.Cluster的典型用法代码示例。如果您正苦于以下问题:Java Cluster类的具体用法?Java Cluster怎么用?Java Cluster使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


Cluster类属于org.apache.hadoop.mapreduce包,在下文中一共展示了Cluster类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: cancel

import org.apache.hadoop.mapreduce.Cluster; //导入依赖的package包/类
@Override
public void cancel(String jobId) throws IOException {
  JobID id = JobID.forName(jobId);
  Cluster cluster = new Cluster(this.getConf());
  try {
    Job job = cluster.getJob(id);
    if (job == null) {
      LOG.error("No job found for " + id);
      // should we throw exception
      return;
    }
    if (job.isComplete() || job.isRetired()) {
      return;
    }

    job.killJob();
    LOG.debug("Killed copy job " + id);
  } catch (InterruptedException e) {
    throw new IOException(e);
  }
}
 
开发者ID:apache,项目名称:hbase,代码行数:22,代码来源:MapReduceBackupCopyJob.java

示例2: testListAttemptIdsWithInvalidInputs

import org.apache.hadoop.mapreduce.Cluster; //导入依赖的package包/类
@Test
public void testListAttemptIdsWithInvalidInputs() throws Exception {
  JobID jobId = JobID.forName(jobIdStr);
  Cluster mockCluster = mock(Cluster.class);
  Job job = mock(Job.class);
  CLI cli = spy(new CLI());

  doReturn(mockCluster).when(cli).createCluster();
  when(mockCluster.getJob(jobId)).thenReturn(job);

  int retCode_JOB_SETUP = cli.run(new String[] { "-list-attempt-ids",
      jobIdStr, "JOB_SETUP", "running" });

  int retCode_JOB_CLEANUP = cli.run(new String[] { "-list-attempt-ids",
      jobIdStr, "JOB_CLEANUP", "running" });

  int retCode_invalidTaskState = cli.run(new String[] { "-list-attempt-ids",
      jobIdStr, "REDUCE", "complete" });

  assertEquals("JOB_SETUP is an invalid input,exit code should be -1", -1,
      retCode_JOB_SETUP);
  assertEquals("JOB_CLEANUP is an invalid input,exit code should be -1", -1,
      retCode_JOB_CLEANUP);
  assertEquals("complete is an invalid input,exit code should be -1", -1,
      retCode_invalidTaskState);

}
 
开发者ID:naver,项目名称:hadoop,代码行数:28,代码来源:TestCLI.java

示例3: testCleanup

import org.apache.hadoop.mapreduce.Cluster; //导入依赖的package包/类
/**
* test methods run end execute of DistCp class. silple copy file
* @throws Exception 
*/
 @Test
 public void testCleanup() throws Exception {

     Configuration conf = getConf();

     Path stagingDir = JobSubmissionFiles.getStagingDir(new Cluster(conf),
         conf);
     stagingDir.getFileSystem(conf).mkdirs(stagingDir);
     Path soure = createFile("tmp.txt");
     Path target = createFile("target.txt");

     DistCp distcp = new DistCp(conf, null);
     String[] arg = { soure.toString(), target.toString() };

     distcp.run(arg);
     Assert.assertTrue(fs.exists(target));

 
 }
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:TestExternalCall.java

示例4: testCleanupTestViaToolRunner

import org.apache.hadoop.mapreduce.Cluster; //导入依赖的package包/类
/**
 * test main method of DistCp. Method should to call System.exit().
 * 
 */
@Test
public void testCleanupTestViaToolRunner() throws IOException, InterruptedException {

  Configuration conf = getConf();

  Path stagingDir = JobSubmissionFiles.getStagingDir(new Cluster(conf), conf);
  stagingDir.getFileSystem(conf).mkdirs(stagingDir);
 
  Path soure = createFile("tmp.txt");
  Path target = createFile("target.txt");
  try {

    String[] arg = {target.toString(),soure.toString()};
    DistCp.main(arg);
    Assert.fail();

  } catch (ExitException t) {
    Assert.assertTrue(fs.exists(target));
    Assert.assertEquals(t.status, 0);
    Assert.assertEquals(
        stagingDir.getFileSystem(conf).listStatus(stagingDir).length, 0);
  }

}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:TestExternalCall.java

示例5: testCleanup

import org.apache.hadoop.mapreduce.Cluster; //导入依赖的package包/类
@Test(timeout=100000)
public void testCleanup() {
  try {
    Path sourcePath = new Path("noscheme:///file");
    List<Path> sources = new ArrayList<Path>();
    sources.add(sourcePath);

    DistCpOptions options = new DistCpOptions(sources, target);

    Configuration conf = getConf();
    Path stagingDir = JobSubmissionFiles.getStagingDir(
            new Cluster(conf), conf);
    stagingDir.getFileSystem(conf).mkdirs(stagingDir);

    try {
      new DistCp(conf, options).execute();
    } catch (Throwable t) {
      Assert.assertEquals(stagingDir.getFileSystem(conf).
          listStatus(stagingDir).length, 0);
    }
  } catch (Exception e) {
    LOG.error("Exception encountered ", e);
    Assert.fail("testCleanup failed " + e.getMessage());
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:TestIntegration.java

示例6: testGetJob

import org.apache.hadoop.mapreduce.Cluster; //导入依赖的package包/类
@Test
public void testGetJob() throws Exception {
  Configuration conf = new Configuration();
  long sleepTime = 100;
  conf.setLong(MRJobConfig.MR_CLIENT_JOB_RETRY_INTERVAL, sleepTime);
  Cluster mockCluster = mock(Cluster.class);
  JobID jobId1 = JobID.forName("job_1234654654_001");
  when(mockCluster.getJob(jobId1)).thenReturn(null);

  for (int i = 0; i < 2; ++i) {
    conf.setInt(MRJobConfig.MR_CLIENT_JOB_MAX_RETRIES, i);
    CLI cli = spy(new CLI(conf));
    cli.cluster = mockCluster;
    doReturn(mockCluster).when(cli).createCluster();
    long start = Time.monotonicNow();
    cli.getJob(jobId1);
    long end = Time.monotonicNow();
    Assert.assertTrue(end - start > (i * sleepTime));
    Assert.assertTrue(end - start < ((i + 1) * sleepTime));
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:22,代码来源:TestCLI.java

示例7: testListAttemptIdsWithInvalidInputs

import org.apache.hadoop.mapreduce.Cluster; //导入依赖的package包/类
@Test
public void testListAttemptIdsWithInvalidInputs() throws Exception {
  JobID jobId = JobID.forName(jobIdStr);
  Cluster mockCluster = mock(Cluster.class);
  Job job = mock(Job.class);
  CLI cli = spy(new CLI());

  doReturn(mockCluster).when(cli).createCluster();
  when(mockCluster.getJob(jobId)).thenReturn(job);

  int retCode_JOB_SETUP = cli.run(new String[] { "-list-attempt-ids",
      jobIdStr, "JOB_SETUP", "running" });
  int retCode_JOB_CLEANUP = cli.run(new String[] { "-list-attempt-ids",
      jobIdStr, "JOB_CLEANUP", "running" });

  assertEquals("JOB_SETUP is a invalid input,exit code should be -1", -1,
      retCode_JOB_SETUP);
  assertEquals("JOB_CLEANUP is a invalid input,exit code should be -1", -1,
      retCode_JOB_CLEANUP);

}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:22,代码来源:TestCLI.java

示例8: testCleanup

import org.apache.hadoop.mapreduce.Cluster; //导入依赖的package包/类
@Test
public void testCleanup() {
  try {
    Path sourcePath = new Path("noscheme:///file");
    List<Path> sources = new ArrayList<Path>();
    sources.add(sourcePath);

    DistCpOptions options = new DistCpOptions(sources, target);

    Configuration conf = getConf();
    Path stagingDir = JobSubmissionFiles.getStagingDir(
            new Cluster(conf), conf);
    stagingDir.getFileSystem(conf).mkdirs(stagingDir);

    try {
      new DistCp(conf, options).execute();
    } catch (Throwable t) {
      Assert.assertEquals(stagingDir.getFileSystem(conf).
          listStatus(stagingDir).length, 0);
    }
  } catch (Exception e) {
    LOG.error("Exception encountered ", e);
    Assert.fail("testCleanup failed " + e.getMessage());
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:26,代码来源:TestIntegration.java

示例9: getTaskReports

import org.apache.hadoop.mapreduce.Cluster; //导入依赖的package包/类
public static Iterator<TaskReport> getTaskReports(Job job, TaskType type) throws IOException {
    if (job.getJobConf().getBoolean(PigConfiguration.PIG_NO_TASK_REPORT, false)) {
        LOG.info("TaskReports are disabled for job: " + job.getAssignedJobID());
        return null;
    }
    Cluster cluster = new Cluster(job.getJobConf());
    try {
        org.apache.hadoop.mapreduce.Job mrJob = cluster.getJob(job.getAssignedJobID());
        if (mrJob == null) { // In local mode, mrJob will be null
            mrJob = job.getJob();
        }
        org.apache.hadoop.mapreduce.TaskReport[] reports = mrJob.getTaskReports(type);
        return DowngradeHelper.downgradeTaskReports(reports);
    } catch (InterruptedException ir) {
        throw new IOException(ir);
    }
}
 
开发者ID:sigmoidanalytics,项目名称:spork,代码行数:18,代码来源:HadoopShims.java

示例10: testSubmitWhenUserHasNoPermissionsToSubmitJobInQueueShouldRaiseYarnQueueAclsException

import org.apache.hadoop.mapreduce.Cluster; //导入依赖的package包/类
@Test(expected = YarnQueueAclsException.class)
public void testSubmitWhenUserHasNoPermissionsToSubmitJobInQueueShouldRaiseYarnQueueAclsException() throws IOException, InterruptedException, ClassNotFoundException{
  Mockito.spy( YarnQueueAclsVerifier.class );
  ConfigurationProxyV2 configurationProxyV2 = Mockito.mock( ConfigurationProxyV2.class );
  Cluster cluster = Mockito.mock( Cluster.class );
  Job job = Mockito.mock( Job.class );

  Mockito.when( configurationProxyV2.getJob() ).thenReturn( job );
  Mockito.when( configurationProxyV2.createClusterDescription( Mockito.any( Configuration.class ) ) ).thenReturn( cluster );
  Mockito.when( configurationProxyV2.submit() ).thenCallRealMethod();
  Mockito.when( cluster.getQueueAclsForCurrentUser() ).thenReturn( new QueueAclsInfo[]{
    new QueueAclsInfo( StringUtils.EMPTY, new String[]{
      "ANOTHER_RIGHTS"
    } ),
    new QueueAclsInfo( StringUtils.EMPTY, new String[]{})
  });

  configurationProxyV2.submit();
}
 
开发者ID:pentaho,项目名称:pentaho-hadoop-shims,代码行数:20,代码来源:ConfigurationProxyV2Test.java

示例11: testSubmitWhenUserHasPermissionsToSubmitJobInQueueShouldExecuteSuccessfully

import org.apache.hadoop.mapreduce.Cluster; //导入依赖的package包/类
@Test
public void testSubmitWhenUserHasPermissionsToSubmitJobInQueueShouldExecuteSuccessfully() throws IOException, InterruptedException, ClassNotFoundException{
  Mockito.spy( YarnQueueAclsVerifier.class );
  ConfigurationProxyV2 configurationProxyV2 = Mockito.mock( ConfigurationProxyV2.class );
  Cluster cluster = Mockito.mock( Cluster.class );
  Job job = Mockito.mock( Job.class );

  Mockito.when( configurationProxyV2.getJob() ).thenReturn( job );
  Mockito.when( configurationProxyV2.createClusterDescription( Mockito.any( Configuration.class ) ) ).thenReturn( cluster );
  Mockito.when( configurationProxyV2.submit() ).thenCallRealMethod();
  Mockito.when( cluster.getQueueAclsForCurrentUser() ).thenReturn( new QueueAclsInfo[]{
    new QueueAclsInfo( StringUtils.EMPTY, new String[]{
      "SUBMIT_APPLICATIONS"
    } ),
    new QueueAclsInfo( StringUtils.EMPTY, new String[]{})
  });

  Assert.assertNotNull( configurationProxyV2.submit() );
}
 
开发者ID:pentaho,项目名称:pentaho-hadoop-shims,代码行数:20,代码来源:ConfigurationProxyV2Test.java

示例12: run

import org.apache.hadoop.mapreduce.Cluster; //导入依赖的package包/类
public int run(String[] args) throws Exception {
  Job job = Job.getInstance(new Cluster(getConf()), getConf());
  if (args.length != 2) {
    usage();
    return 2;
  }
  TeraInputFormat.setInputPaths(job, new Path(args[0]));
  FileOutputFormat.setOutputPath(job, new Path(args[1]));
  job.setJobName("TeraSum");
  job.setJarByClass(TeraChecksum.class);
  job.setMapperClass(ChecksumMapper.class);
  job.setReducerClass(ChecksumReducer.class);
  job.setOutputKeyClass(NullWritable.class);
  job.setOutputValueClass(Unsigned16.class);
  // force a single reducer
  job.setNumReduceTasks(1);
  job.setInputFormatClass(TeraInputFormat.class);
  return job.waitForCompletion(true) ? 0 : 1;
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:20,代码来源:TeraChecksum.java

示例13: run

import org.apache.hadoop.mapreduce.Cluster; //导入依赖的package包/类
/**
 * @param args the cli arguments
 */
public int run(String[] args) 
    throws IOException, InterruptedException, ClassNotFoundException {
  Job job = Job.getInstance(new Cluster(getConf()), getConf());
  if (args.length != 2) {
    usage();
    return 2;
  }
  setNumberOfRows(job, parseHumanLong(args[0]));
  Path outputDir = new Path(args[1]);
  if (outputDir.getFileSystem(getConf()).exists(outputDir)) {
    throw new IOException("Output directory " + outputDir + 
                          " already exists.");
  }
  FileOutputFormat.setOutputPath(job, outputDir);
  job.setJobName("TeraGen");
  job.setJarByClass(TeraGen.class);
  job.setMapperClass(SortGenMapper.class);
  job.setNumReduceTasks(0);
  job.setOutputKeyClass(Text.class);
  job.setOutputValueClass(Text.class);
  job.setInputFormatClass(RangeInputFormat.class);
  job.setOutputFormatClass(TeraOutputFormat.class);
  return job.waitForCompletion(true) ? 0 : 1;
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:28,代码来源:TeraGen.java

示例14: run

import org.apache.hadoop.mapreduce.Cluster; //导入依赖的package包/类
public int run(String[] args) throws Exception {
  Job job = Job.getInstance(new Cluster(getConf()), getConf());
  if (args.length != 2) {
    usage();
    return 1;
  }
  TeraInputFormat.setInputPaths(job, new Path(args[0]));
  FileOutputFormat.setOutputPath(job, new Path(args[1]));
  job.setJobName("TeraValidate");
  job.setJarByClass(TeraValidate.class);
  job.setMapperClass(ValidateMapper.class);
  job.setReducerClass(ValidateReducer.class);
  job.setOutputKeyClass(Text.class);
  job.setOutputValueClass(Text.class);
  // force a single reducer
  job.setNumReduceTasks(1);
  // force a single split 
  FileInputFormat.setMinInputSplitSize(job, Long.MAX_VALUE);
  job.setInputFormatClass(TeraInputFormat.class);
  return job.waitForCompletion(true) ? 0 : 1;
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:22,代码来源:TeraValidate.java

示例15: checkAccessToKill

import org.apache.hadoop.mapreduce.Cluster; //导入依赖的package包/类
private void checkAccessToKill(JobTracker tracker, final JobConf mrConf, 
    UserGroupInformation killer) throws IOException, InterruptedException,
    ClassNotFoundException {
  Job job = submitSleepJob(1, 1, 100, 100, false, "u1,g1",
      "p1" + NAME_SEPARATOR + "p11", conf);
  JobID jobID = job.getStatus().getJobID();
  //Ensure that the jobinprogress is initied before we issue a kill 
  //signal to the job.
  JobInProgress jip =  tracker.getJob(
      org.apache.hadoop.mapred.JobID.downgrade(jobID));
  tracker.initJob(jip);
  Cluster cluster = killer.doAs(new PrivilegedExceptionAction<Cluster>() {
    public Cluster run() throws IOException {
      return new Cluster(mrConf);
    }
  });
  cluster.getJob(jobID).killJob();
  assertEquals("job not killed by " + killer,
      cluster.getJob(jobID).getStatus().getState(), (State.KILLED));
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:21,代码来源:TestQueueManagerWithJobTracker.java


注:本文中的org.apache.hadoop.mapreduce.Cluster类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。