當前位置: 首頁>>代碼示例>>Java>>正文


Java SequenceFileInputFormat.addInputPath方法代碼示例

本文整理匯總了Java中org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat.addInputPath方法的典型用法代碼示例。如果您正苦於以下問題:Java SequenceFileInputFormat.addInputPath方法的具體用法?Java SequenceFileInputFormat.addInputPath怎麽用?Java SequenceFileInputFormat.addInputPath使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat的用法示例。


在下文中一共展示了SequenceFileInputFormat.addInputPath方法的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: initSumMRJob

import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat; //導入方法依賴的package包/類
public static void initSumMRJob(Job job, String inputPath, String outtable, String auths) throws AccumuloSecurityException, IOException {
  Configuration conf = job.getConfiguration();
  String username = conf.get(USERNAME);
  String password = conf.get(PASSWORD);
  String instance = conf.get(INSTANCE);
  String zookeepers = conf.get(ZOOKEEPERS);

  if (zookeepers != null) {
    AccumuloOutputFormat.setConnectorInfo(job, username, new PasswordToken(password));
    AccumuloOutputFormat.setZooKeeperInstance(job, instance, zookeepers);
  } else {
    throw new IllegalArgumentException("Must specify zookeepers");
  }

  SequenceFileInputFormat.addInputPath(job, new Path(inputPath));
  job.setInputFormatClass(SequenceFileInputFormat.class);
  job.setMapOutputKeyClass(TripleEntry.class);
  job.setMapOutputValueClass(CardList.class);

  AccumuloOutputFormat.setDefaultTableName(job, outtable);
  job.setOutputFormatClass(AccumuloOutputFormat.class);
  job.setOutputKeyClass(Text.class);
  job.setOutputValueClass(Mutation.class);

}
 
開發者ID:apache,項目名稱:incubator-rya,代碼行數:26,代碼來源:JoinSelectStatsUtil.java

示例2: total

import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat; //導入方法依賴的package包/類
public static void total(String name, String in, String out)
		throws IOException, InterruptedException, ClassNotFoundException {
	Configuration conf = new Configuration();
	conf.set(QUERIED_NAME, name);
	Job job = Job.getInstance(new Cluster(conf), conf);
	job.setJarByClass(Total.class);

	// in
	if (!in.endsWith("/"))
		in = in.concat("/");
	in = in.concat("employees");
	SequenceFileInputFormat.addInputPath(job, new Path(in));
	job.setInputFormatClass(SequenceFileInputFormat.class);

	// map
	job.setMapperClass(TotalMapper.class);
	job.setMapOutputKeyClass(Text.class);
	job.setMapOutputValueClass(DoubleWritable.class);

	// reduce
	job.setCombinerClass(TotalReducer.class);
	job.setReducerClass(TotalReducer.class);

	// out
	SequenceFileOutputFormat.setOutputPath(job, new Path(out));
	job.setOutputFormatClass(SequenceFileOutputFormat.class);
	job.setOutputKeyClass(Text.class);
	job.setOutputValueClass(DoubleWritable.class);

	job.waitForCompletion(true);
}
 
開發者ID:amritbhat786,項目名稱:DocIT,代碼行數:32,代碼來源:Total.java

示例3: createJob

import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat; //導入方法依賴的package包/類
public static Job createJob(String name, String base) throws IOException {
	Configuration conf = new Configuration();
	conf.set(Total.QUERIED_NAME, name);
	Job job = Job.getInstance(new Cluster(conf), conf);
	job.setJarByClass(Cut.class);

	// in
	String in = base;
	if (!base.endsWith("/"))
		in = in.concat("/");
	in = in.concat("employees");
	SequenceFileInputFormat.addInputPath(job, new Path(in));
	job.setInputFormatClass(SequenceFileInputFormat.class);

	// map
	job.setMapperClass(CutMapper.class);
	job.setMapOutputKeyClass(Text.class);
	job.setMapOutputValueClass(Employee.class);

	// out
	SequenceFileOutputFormat.setOutputPath(job, new Path(base + "/tmp"));
	job.setOutputFormatClass(SequenceFileOutputFormat.class);
	job.setOutputKeyClass(Text.class);
	job.setOutputValueClass(Employee.class);

	return job;
}
 
開發者ID:amritbhat786,項目名稱:DocIT,代碼行數:28,代碼來源:Cut.java

示例4: runCopyJob

import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat; //導入方法依賴的package包/類
/**
 * Run Map-Reduce Job to perform the files copy.
 */
private boolean runCopyJob(final Path inputRoot, final Path outputRoot,
    final List<Pair<Path, Long>> snapshotFiles, final boolean verifyChecksum,
    final String filesUser, final String filesGroup, final int filesMode,
    final int mappers) throws IOException, InterruptedException, ClassNotFoundException {
  Configuration conf = getConf();
  if (filesGroup != null) conf.set(CONF_FILES_GROUP, filesGroup);
  if (filesUser != null) conf.set(CONF_FILES_USER, filesUser);
  conf.setInt(CONF_FILES_MODE, filesMode);
  conf.setBoolean(CONF_CHECKSUM_VERIFY, verifyChecksum);
  conf.set(CONF_OUTPUT_ROOT, outputRoot.toString());
  conf.set(CONF_INPUT_ROOT, inputRoot.toString());
  conf.setInt("mapreduce.job.maps", mappers);

  // job.setMapSpeculativeExecution(false)
  conf.setBoolean("mapreduce.map.speculative", false);
  conf.setBoolean("mapreduce.reduce.speculative", false);
  conf.setBoolean("mapred.map.tasks.speculative.execution", false);
  conf.setBoolean("mapred.reduce.tasks.speculative.execution", false);

  Job job = new Job(conf);
  job.setJobName("ExportSnapshot");
  job.setJarByClass(ExportSnapshot.class);
  job.setMapperClass(ExportMapper.class);
  job.setInputFormatClass(SequenceFileInputFormat.class);
  job.setOutputFormatClass(NullOutputFormat.class);
  job.setNumReduceTasks(0);
  for (Path path: createInputFiles(conf, snapshotFiles, mappers)) {
    LOG.debug("Add Input Path=" + path);
    SequenceFileInputFormat.addInputPath(job, path);
  }

  return job.waitForCompletion(true);
}
 
開發者ID:fengchen8086,項目名稱:LCIndex-HBase-0.94.16,代碼行數:37,代碼來源:ExportSnapshot.java

示例5: runCopyJob

import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat; //導入方法依賴的package包/類
/**
 * Run Map-Reduce Job to perform the files copy.
 */
private void runCopyJob(final Path inputRoot, final Path outputRoot,
    final List<Pair<Path, Long>> snapshotFiles, final boolean verifyChecksum,
    final String filesUser, final String filesGroup, final int filesMode,
    final int mappers, final int bandwidthMB)
        throws IOException, InterruptedException, ClassNotFoundException {
  Configuration conf = getConf();
  if (filesGroup != null) conf.set(CONF_FILES_GROUP, filesGroup);
  if (filesUser != null) conf.set(CONF_FILES_USER, filesUser);
  conf.setInt(CONF_FILES_MODE, filesMode);
  conf.setBoolean(CONF_CHECKSUM_VERIFY, verifyChecksum);
  conf.set(CONF_OUTPUT_ROOT, outputRoot.toString());
  conf.set(CONF_INPUT_ROOT, inputRoot.toString());
  conf.setInt("mapreduce.job.maps", mappers);
  conf.setInt(CONF_BANDWIDTH_MB, bandwidthMB);

  Job job = new Job(conf);
  job.setJobName("ExportSnapshot");
  job.setJarByClass(ExportSnapshot.class);
  TableMapReduceUtil.addDependencyJars(job);
  job.setMapperClass(ExportMapper.class);
  job.setInputFormatClass(SequenceFileInputFormat.class);
  job.setOutputFormatClass(NullOutputFormat.class);
  job.setMapSpeculativeExecution(false);
  job.setNumReduceTasks(0);

  // Create MR Input
  Path inputFolderPath = getInputFolderPath(conf);
  for (Path path: createInputFiles(conf, inputFolderPath, snapshotFiles, mappers)) {
    LOG.debug("Add Input Path=" + path);
    SequenceFileInputFormat.addInputPath(job, path);
  }

  try {
    // Acquire the delegation Tokens
    TokenCache.obtainTokensForNamenodes(job.getCredentials(),
      new Path[] { inputRoot, outputRoot }, conf);

    // Run the MR Job
    if (!job.waitForCompletion(true)) {
      // TODO: Replace the fixed string with job.getStatus().getFailureInfo()
      // when it will be available on all the supported versions.
      throw new ExportSnapshotException("Copy Files Map-Reduce Job failed");
    }
  } finally {
    // Remove MR Input
    try {
      inputFolderPath.getFileSystem(conf).delete(inputFolderPath, true);
    } catch (IOException e) {
      LOG.warn("Unable to remove MR input folder: " + inputFolderPath, e);
    }
  }
}
 
開發者ID:tenggyut,項目名稱:HIndex,代碼行數:56,代碼來源:ExportSnapshot.java

示例6: initSumMRJob

import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat; //導入方法依賴的package包/類
public static void initSumMRJob(Job job, String inputPath, String outtable) throws AccumuloSecurityException, IOException {
  
    Configuration conf = job.getConfiguration();
    
    String username = conf.get(USERNAME);
    String password = conf.get(PASSWORD);
    String instance = conf.get(INSTANCE);
   
    

    AccumuloOutputFormat.setConnectorInfo(job, username, new PasswordToken(password));
    AccumuloOutputFormat.setMockInstance(job, instance);
    AccumuloOutputFormat.setDefaultTableName(job, outtable);

    
    SequenceFileInputFormat.addInputPath(job, new Path(tempDir.getAbsolutePath()));
    job.setInputFormatClass(SequenceFileInputFormat.class);
    job.setMapOutputKeyClass(TripleEntry.class);
    job.setMapOutputValueClass(CardList.class);

   
    job.setOutputFormatClass(AccumuloOutputFormat.class);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(Mutation.class);
     
  
}
 
開發者ID:apache,項目名稱:incubator-rya,代碼行數:28,代碼來源:JoinSelectStatisticsTest.java


注:本文中的org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat.addInputPath方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。