當前位置: 首頁>>代碼示例>>Java>>正文


Java JobConf.set方法代碼示例

本文整理匯總了Java中org.apache.hadoop.mapred.JobConf.set方法的典型用法代碼示例。如果您正苦於以下問題:Java JobConf.set方法的具體用法?Java JobConf.set怎麽用?Java JobConf.set使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.mapred.JobConf的用法示例。


在下文中一共展示了JobConf.set方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: testRetrieveDatasets

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
@Test
public void testRetrieveDatasets() throws IOException {
  JobConf conf = new JobConf();
  conf.set(DBConfiguration.URL_PROPERTY, "localhost:12345");
  conf.set(DBConfiguration.USERNAME_PROPERTY, "user");
  conf.set(DBConfiguration.PASSWORD_PROPERTY, "pssword");
  // set the password in the secure credentials object
  Text PASSWORD_SECRET_KEY = new Text(DBConfiguration.PASSWORD_PROPERTY);
  conf.getCredentials().addSecretKey(PASSWORD_SECRET_KEY,
      "pssword".getBytes());

  String dsName = "dsName1";
  conf.set(MainframeConfiguration.MAINFRAME_INPUT_DATASET_NAME, dsName);
  Job job = new Job(conf);
  ConfigurationHelper.setJobNumMaps(job, 2);
  //format.getSplits(job);

  List<InputSplit> splits = new ArrayList<InputSplit>();
  splits = ((MainframeDatasetInputFormat<SqoopRecord>) format).getSplits(job);
  Assert.assertEquals("test1", ((MainframeDatasetInputSplit) splits.get(0))
      .getNextDataset().toString());
  Assert.assertEquals("test2", ((MainframeDatasetInputSplit) splits.get(1))
      .getNextDataset().toString());
}
 
開發者ID:aliyun,項目名稱:aliyun-maxcompute-data-collectors,代碼行數:25,代碼來源:TestMainframeDatasetInputFormat.java

示例2: initCredentials

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
public static void initCredentials(JobConf job) throws IOException {
  UserProvider userProvider = UserProvider.instantiate(job);
  if (userProvider.isHadoopSecurityEnabled()) {
    // propagate delegation related props from launcher job to MR job
    if (System.getenv("HADOOP_TOKEN_FILE_LOCATION") != null) {
      job.set("mapreduce.job.credentials.binary", System.getenv("HADOOP_TOKEN_FILE_LOCATION"));
    }
  }

  if (userProvider.isHBaseSecurityEnabled()) {
    Connection conn = ConnectionFactory.createConnection(job);
    try {
      // login the server principal (if using secure Hadoop)
      User user = userProvider.getCurrent();
      TokenUtil.addTokenForJob(conn, job, user);
    } catch (InterruptedException ie) {
      ie.printStackTrace();
      Thread.currentThread().interrupt();
    } finally {
      conn.close();
    }
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:24,代碼來源:TableMapReduceUtil.java

示例3: testEmptyJoin

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
public void testEmptyJoin() throws Exception {
  JobConf job = new JobConf();
  Path base = cluster.getFileSystem().makeQualified(new Path("/empty"));
  Path[] src = { new Path(base,"i0"), new Path("i1"), new Path("i2") };
  job.set("mapreduce.join.expr", CompositeInputFormat.compose("outer",
      Fake_IF.class, src));
  job.setInputFormat(CompositeInputFormat.class);
  FileOutputFormat.setOutputPath(job, new Path(base, "out"));

  job.setMapperClass(IdentityMapper.class);
  job.setReducerClass(IdentityReducer.class);
  job.setOutputKeyClass(IncomparableKey.class);
  job.setOutputValueClass(NullWritable.class);

  JobClient.runJob(job);
  base.getFileSystem(job).delete(base, true);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:18,代碼來源:TestDatamerge.java

示例4: updateConfiguration

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
/**
 * Updates configuration based on what's given on the command line.
 *
 * @param conf
 *          The configuration object
 * @param keyvalues
 *          An array of interleaved key value pairs.
 */
private void updateConfiguration(JobConf conf, String[] keyvalues) {
  int num_confs_updated = 0;
  if (keyvalues != null) {
    for (String prop : keyvalues) {
      String[] keyval = prop.split("=", 2);
      if (keyval.length == 2) {
        conf.set(keyval[0], keyval[1]);
        num_confs_updated++;
      } else {
        LOG.warn("Ignoring -D option " + prop);
      }
    }
  }
  LOG.info("Updated " + num_confs_updated
      + " configuration settings from command line.");
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:25,代碼來源:MiniHadoopClusterManager.java

示例5: setUp

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
protected void setUp() throws Exception {
  super.setUp();
  if (System.getProperty("hadoop.log.dir") == null) {
    System.setProperty("hadoop.log.dir", "/tmp");
  }
  int taskTrackers = 2;
  int dataNodes = 2;
  String proxyUser = System.getProperty("user.name");
  String proxyGroup = "g";
  StringBuilder sb = new StringBuilder();
  sb.append("127.0.0.1,localhost");
  for (InetAddress i : InetAddress.getAllByName(InetAddress.getLocalHost().getHostName())) {
    sb.append(",").append(i.getCanonicalHostName());
  }

  JobConf conf = new JobConf();
  conf.set("dfs.block.access.token.enable", "false");
  conf.set("dfs.permissions", "true");
  conf.set("hadoop.security.authentication", "simple");

  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(dataNodes)
      .build();
  FileSystem fileSystem = dfsCluster.getFileSystem();
  fileSystem.mkdirs(new Path("/tmp"));
  fileSystem.mkdirs(new Path("/user"));
  fileSystem.mkdirs(new Path("/hadoop/mapred/system"));
  fileSystem.setPermission(new Path("/tmp"), FsPermission.valueOf("-rwxrwxrwx"));
  fileSystem.setPermission(new Path("/user"), FsPermission.valueOf("-rwxrwxrwx"));
  fileSystem.setPermission(new Path("/hadoop/mapred/system"), FsPermission.valueOf("-rwx------"));
  String nnURI = fileSystem.getUri().toString();
  int numDirs = 1;
  String[] racks = null;
  String[] hosts = null;
  mrCluster = new MiniMRCluster(0, 0, taskTrackers, nnURI, numDirs, racks, hosts, null, conf);
  ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:37,代碼來源:TestNonExistentJob.java

示例6: init

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
@Before
public void init() throws HadoopIllegalArgumentException, IOException {
  conf = new JobConf();
  conf.set(JHAdminConfig.JHS_ADMIN_ADDRESS, "0.0.0.0:0");
  conf.setClass("hadoop.security.group.mapping", MockUnixGroupsMapping.class,
      GroupMappingServiceProvider.class);
  conf.setLong("hadoop.security.groups.cache.secs", groupRefreshTimeoutSec);
  conf.setBoolean(
        CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION,
        securityEnabled);
  Groups.getUserToGroupsMappingService(conf);
  jobHistoryService = mock(JobHistory.class);
  alds = mock(AggregatedLogDeletionService.class);

  hsAdminServer = new HSAdminServer(alds, jobHistoryService) {

    @Override
    protected Configuration createConf() {
      return conf;
    }
  };
  hsAdminServer.init(conf);
  hsAdminServer.start();
  conf.setSocketAddr(JHAdminConfig.JHS_ADMIN_ADDRESS,
      hsAdminServer.clientRpcServer.getListenerAddress());
  hsAdminClient = new HSAdmin(conf);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:28,代碼來源:TestHSAdminServer.java

示例7: assertGetSplits

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
/**
   * Assert that the splits are created as expected from the specified number of blocks.
   *   First puts the required key-values in a region and then queries the
   *   required splits.
   *
   * @param mtb the test helper for monarch interactions
   * @param regionName the region name
   * @param map the map containing prefix and block-count with expected length for splits
   * @param splitSize the split size
   * @throws RMIException
   */
  private void assertGetSplits(final MonarchDUnitBase mtb, final String regionName,
                               final Map<String, Long[]> map, final long splitSize) throws RMIException {
    int expectedSplitCount = 0;
    Long[] value;
    /** MonarchSplit.getSplits looks for the data in region with suffix..
     *   so need to put data in region with correct name **/
    for (Map.Entry<String, Long[]> e : map.entrySet()) {
      value = e.getValue();
      mtb.putInRegionOnServer(regionName + MonarchUtils.META_TABLE_SFX, e.getKey() + MonarchUtils.KEY_BLOCKS_SFX, value[0]);
      mtb.assertOnClient(regionName + MonarchUtils.META_TABLE_SFX, e.getKey() + MonarchUtils.KEY_BLOCKS_SFX, value[0]);
      expectedSplitCount += (value.length - 1);
    }

    /** setup the job-configuration and get the required splits **/
    JobConf jobConf = new JobConf();
    jobConf.set(MonarchUtils.REGION, regionName);
    jobConf.set(MonarchUtils.LOCATOR_PORT, mtb.getLocatorPort());
    jobConf.set(MonarchUtils.SPLIT_SIZE_KEY, String.valueOf(splitSize));
    jobConf.set("mapred.input.dir", "dummy");

    MonarchSplit[] sps = (MonarchSplit[])MonarchSplit.getSplits(jobConf, 1, 0);
    assertEquals(sps.length, expectedSplitCount);

//    System.out.println("MonarchSplits = " + Arrays.asList(sps));

    /** assert on length of individual splits **/
    Map<String, Integer> expectedSplitLengthIndexMap = new HashMap<>(5);
    Integer posInSamePrefix;
    long expectedLength;
    for (final MonarchSplit sp : sps) {
      posInSamePrefix = expectedSplitLengthIndexMap.get(sp.getKeyPrefix());
      if (posInSamePrefix == null) {
        posInSamePrefix = 1;
      }
      expectedLength = map.get(sp.getKeyPrefix())[posInSamePrefix++];
      assertEquals(sp.getLength(), expectedLength);
      expectedSplitLengthIndexMap.put(sp.getKeyPrefix(), posInSamePrefix);
    }
  }
 
開發者ID:ampool,項目名稱:monarch,代碼行數:51,代碼來源:MonarchSplitTest.java

示例8: getSplits

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
/**
 * Get input splits for the specified split-size.
 *
 * @param regionName the region name
 * @param splitSize  the split-size
 * @return an array of splits to be read
 */
private InputSplit[] getSplits(final String regionName, final int splitSize) throws IOException{
  JobConf jobConf = new JobConf();
  jobConf.set(MonarchUtils.REGION, regionName);
  jobConf.set("mapred.input.dir", "/home/mgalande");
  jobConf.set(MonarchUtils.SPLIT_SIZE_KEY, String.valueOf(splitSize));
  jobConf.set(MonarchUtils.MONARCH_TABLE_TYPE, "unordered");
  return MonarchSplit.getSplits(jobConf, 1);
}
 
開發者ID:ampool,項目名稱:monarch,代碼行數:16,代碼來源:MonarchRecordReaderTest.java

示例9: testMultiConfigure

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
@Test
public void testMultiConfigure() {
  KeyFieldBasedPartitioner<Text, Text> kfbp =
    new KeyFieldBasedPartitioner<Text, Text>();
  JobConf conf = new JobConf();
  conf.set(KeyFieldBasedPartitioner.PARTITIONER_OPTIONS, "-k1,1");
  kfbp.setConf(conf);
  Text key = new Text("foo\tbar");
  Text val = new Text("val");
  int partNum = kfbp.getPartition(key, val, 4096);
  kfbp.configure(conf);
  assertEquals(partNum, kfbp.getPartition(key,val, 4096));
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:14,代碼來源:TestKeyFieldBasedPartitioner.java

示例10: testMRAppMasterMidLock

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
@Test
public void testMRAppMasterMidLock() throws IOException,
    InterruptedException {
  String applicationAttemptIdStr = "appattempt_1317529182569_0004_000002";
  String containerIdStr = "container_1317529182569_0004_000002_1";
  String userName = "TestAppMasterUser";
  JobConf conf = new JobConf();
  conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
  ApplicationAttemptId applicationAttemptId = ConverterUtils
      .toApplicationAttemptId(applicationAttemptIdStr);
  JobId jobId =  TypeConverter.toYarn(
      TypeConverter.fromYarn(applicationAttemptId.getApplicationId()));
  Path start = MRApps.getStartJobCommitFile(conf, userName, jobId);
  FileSystem fs = FileSystem.get(conf);
  //Create the file, but no end file so we should unregister with an error.
  fs.create(start).close();
  ContainerId containerId = ConverterUtils.toContainerId(containerIdStr);
  MRAppMaster appMaster =
      new MRAppMasterTest(applicationAttemptId, containerId, "host", -1, -1,
          System.currentTimeMillis(), false, false);
  boolean caught = false;
  try {
    MRAppMaster.initAndStartAppMaster(appMaster, conf, userName);
  } catch (IOException e) {
    //The IO Exception is expected
    LOG.info("Caught expected Exception", e);
    caught = true;
  }
  assertTrue(caught);
  assertTrue(appMaster.errorHappenedShutDown);
  assertEquals(JobStateInternal.ERROR, appMaster.forcedState);
  appMaster.stop();

  // verify the final status is FAILED
  verifyFailedStatus((MRAppMasterTest)appMaster, "FAILED");
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:37,代碼來源:TestMRAppMaster.java

示例11: testNotificationOnLastRetryUnregistrationFailure

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
@Test
public void testNotificationOnLastRetryUnregistrationFailure()
    throws Exception {
  HttpServer2 server = startHttpServer();
  MRApp app = spy(new MRAppWithCustomContainerAllocator(2, 2, false,
      this.getClass().getName(), true, 2, false));
  // Currently, we will have isLastRetry always equals to false at beginning
  // of MRAppMaster, except staging area exists or commit already started at 
  // the beginning.
  // Now manually set isLastRetry to true and this should reset to false when
  // unregister failed.
  app.isLastAMRetry = true;
  doNothing().when(app).sysexit();
  JobConf conf = new JobConf();
  conf.set(JobContext.MR_JOB_END_NOTIFICATION_URL,
      JobEndServlet.baseUrl + "jobend?jobid=$jobId&status=$jobStatus");
  JobImpl job = (JobImpl)app.submit(conf);
  app.waitForState(job, JobState.RUNNING);
  app.getContext().getEventHandler()
    .handle(new JobEvent(app.getJobId(), JobEventType.JOB_AM_REBOOT));
  app.waitForInternalState(job, JobStateInternal.REBOOT);
  // Now shutdown. User should see FAILED state.
  // Unregistration fails: isLastAMRetry is recalculated, this is
  ///reboot will stop service internally, we don't need to shutdown twice
  app.waitForServiceToStop(10000);
  Assert.assertFalse(app.isLastAMRetry());
  // Since it's not last retry, JobEndServlet didn't called
  Assert.assertEquals(0, JobEndServlet.calledTimes);
  Assert.assertNull(JobEndServlet.requestUri);
  Assert.assertNull(JobEndServlet.foundJobState);
  server.stop();
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:33,代碼來源:TestJobEndNotifier.java

示例12: copy

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
/**
 * Driver to copy srcPath to destPath depending on required protocol.
 * @param conf configuration
 * @param args arguments
 */
static void copy(final Configuration conf, final Arguments args
    ) throws IOException {
  LOG.info("srcPaths=" + args.srcs);
  if (!args.dryrun || args.flags.contains(Options.UPDATE)) {
    LOG.info("destPath=" + args.dst);
  }

  JobConf job = createJobConf(conf);
  
  checkSrcPath(job, args.srcs);
  if (args.preservedAttributes != null) {
    job.set(PRESERVE_STATUS_LABEL, args.preservedAttributes);
  }
  if (args.mapredSslConf != null) {
    job.set("dfs.https.client.keystore.resource", args.mapredSslConf);
  }
  
  //Initialize the mapper
  try {
    if (setup(conf, job, args)) {
      JobClient.runJob(job);
    }
    if(!args.dryrun) {
      finalize(conf, job, args.dst, args.preservedAttributes);
    }
  } finally {
    if (!args.dryrun) {
      //delete tmp
      fullyDelete(job.get(TMP_DIR_LABEL), job);
    }
    //delete jobDirectory
    fullyDelete(job.get(JOB_DIR_LABEL), job);
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:40,代碼來源:DistCpV1.java

示例13: addInputPath

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
/**
 * Add a {@link Path} with a custom {@link InputFormat} to the list of
 * inputs for the map-reduce job.
 * 
 * @param conf The configuration of the job
 * @param path {@link Path} to be added to the list of inputs for the job
 * @param inputFormatClass {@link InputFormat} class to use for this path
 */
public static void addInputPath(JobConf conf, Path path,
    Class<? extends InputFormat> inputFormatClass) {

  String inputFormatMapping = path.toString() + ";"
     + inputFormatClass.getName();
  String inputFormats = conf.get("mapreduce.input.multipleinputs.dir.formats");
  conf.set("mapreduce.input.multipleinputs.dir.formats",
     inputFormats == null ? inputFormatMapping : inputFormats + ","
         + inputFormatMapping);

  conf.setInputFormat(DelegatingInputFormat.class);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:21,代碼來源:MultipleInputs.java

示例14: testRunner

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
/**
 * test PipesMapRunner    test the transfer data from reader
 *
 * @throws Exception
 */
@Test
public void testRunner() throws Exception {

  // clean old password files
  File[] psw = cleanTokenPasswordFile();
  try {
    RecordReader<FloatWritable, NullWritable> rReader = new ReaderPipesMapRunner();
    JobConf conf = new JobConf();
    conf.set(Submitter.IS_JAVA_RR, "true");
    // for stdour and stderror

    conf.set(MRJobConfig.TASK_ATTEMPT_ID, taskName);

    CombineOutputCollector<IntWritable, Text> output = new CombineOutputCollector<IntWritable, Text>(
            new Counters.Counter(), new Progress());
    FileSystem fs = new RawLocalFileSystem();
    fs.setConf(conf);
    Writer<IntWritable, Text> wr = new Writer<IntWritable, Text>(conf, fs.create(
            new Path(workSpace + File.separator + "outfile")), IntWritable.class,
            Text.class, null, null, true);
    output.setWriter(wr);
    // stub for client
    File fCommand = getFileCommand("org.apache.hadoop.mapred.pipes.PipeApplicationRunnableStub");

    conf.set(MRJobConfig.CACHE_LOCALFILES, fCommand.getAbsolutePath());
    // token for authorization
    Token<AMRMTokenIdentifier> token = new Token<AMRMTokenIdentifier>(
            "user".getBytes(), "password".getBytes(), new Text("kind"), new Text(
            "service"));
    TokenCache.setJobToken(token,  conf.getCredentials());
    conf.setBoolean(MRJobConfig.SKIP_RECORDS, true);
    TestTaskReporter reporter = new TestTaskReporter();
    PipesMapRunner<FloatWritable, NullWritable, IntWritable, Text> runner = new PipesMapRunner<FloatWritable, NullWritable, IntWritable, Text>();

    initStdOut(conf);

    runner.configure(conf);
    runner.run(rReader, output, reporter);

    String stdOut = readStdOut(conf);

    // test part of translated data. As common file for client and test -
    // clients stdOut
    // check version
    assertTrue(stdOut.contains("CURRENT_PROTOCOL_VERSION:0"));
    // check key and value classes
    assertTrue(stdOut
            .contains("Key class:org.apache.hadoop.io.FloatWritable"));
    assertTrue(stdOut
            .contains("Value class:org.apache.hadoop.io.NullWritable"));
    // test have sent all data from reader
    assertTrue(stdOut.contains("value:0.0"));
    assertTrue(stdOut.contains("value:9.0"));

  } finally {
    if (psw != null) {
      // remove password files
      for (File file : psw) {
        file.deleteOnExit();
      }
    }

  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:70,代碼來源:TestPipeApplication.java

示例15: runParseTest

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
public void runParseTest(String fieldTerminator, String lineTerminator,
    String encloser, String escape, boolean encloseRequired)
    throws IOException {

  ClassLoader prevClassLoader = null;

  String [] argv = getArgv(true, fieldTerminator, lineTerminator,
      encloser, escape, encloseRequired);
  runImport(argv);
  try {
    String tableClassName = getTableName();

    argv = getArgv(false, fieldTerminator, lineTerminator, encloser, escape,
        encloseRequired);
    SqoopOptions opts = new ImportTool().parseArguments(argv, null, null,
        true);

    CompilationManager compileMgr = new CompilationManager(opts);
    String jarFileName = compileMgr.getJarFilename();

    // Make sure the user's class is loaded into our address space.
    prevClassLoader = ClassLoaderStack.addJarFile(jarFileName,
        tableClassName);

    JobConf job = new JobConf();
    job.setJar(jarFileName);

    // Tell the job what class we're testing.
    job.set(ReparseMapper.USER_TYPE_NAME_KEY, tableClassName);

    // use local mode in the same JVM.
    ConfigurationHelper.setJobtrackerAddr(job, "local");
    if (!BaseSqoopTestCase.isOnPhysicalCluster()) {
      job.set(CommonArgs.FS_DEFAULT_NAME, CommonArgs.LOCAL_FS);
    }
    String warehouseDir = getWarehouseDir();
    Path warehousePath = new Path(warehouseDir);
    Path inputPath = new Path(warehousePath, getTableName());
    Path outputPath = new Path(warehousePath, getTableName() + "-out");

    job.setMapperClass(ReparseMapper.class);
    job.setNumReduceTasks(0);
    FileInputFormat.addInputPath(job, inputPath);
    FileOutputFormat.setOutputPath(job, outputPath);

    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(NullWritable.class);

    JobClient.runJob(job);
  } catch (InvalidOptionsException ioe) {
    fail(ioe.toString());
  } catch (ParseException pe) {
    fail(pe.toString());
  } finally {
    if (null != prevClassLoader) {
      ClassLoaderStack.setCurrentClassLoader(prevClassLoader);
    }
  }
}
 
開發者ID:aliyun,項目名稱:aliyun-maxcompute-data-collectors,代碼行數:60,代碼來源:TestParseMethods.java


注:本文中的org.apache.hadoop.mapred.JobConf.set方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。