本文整理匯總了Java中org.apache.hadoop.mapred.JobConf.setLong方法的典型用法代碼示例。如果您正苦於以下問題:Java JobConf.setLong方法的具體用法?Java JobConf.setLong怎麽用?Java JobConf.setLong使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.mapred.JobConf
的用法示例。
在下文中一共展示了JobConf.setLong方法的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: init
import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
@Before
public void init() throws HadoopIllegalArgumentException, IOException {
conf = new JobConf();
conf.set(JHAdminConfig.JHS_ADMIN_ADDRESS, "0.0.0.0:0");
conf.setClass("hadoop.security.group.mapping", MockUnixGroupsMapping.class,
GroupMappingServiceProvider.class);
conf.setLong("hadoop.security.groups.cache.secs", groupRefreshTimeoutSec);
conf.setBoolean(
CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION,
securityEnabled);
Groups.getUserToGroupsMappingService(conf);
jobHistoryService = mock(JobHistory.class);
alds = mock(AggregatedLogDeletionService.class);
hsAdminServer = new HSAdminServer(alds, jobHistoryService) {
@Override
protected Configuration createConf() {
return conf;
}
};
hsAdminServer.init(conf);
hsAdminServer.start();
conf.setSocketAddr(JHAdminConfig.JHS_ADMIN_ADDRESS,
hsAdminServer.clientRpcServer.getListenerAddress());
hsAdminClient = new HSAdmin(conf);
}
示例2: getSplits
import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
/**
* Build a CompositeInputSplit from the child InputFormats by assigning the
* ith split from each child to the ith composite split.
*/
public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {
setFormat(job);
job.setLong("mapred.min.split.size", Long.MAX_VALUE);
return root.getSplits(job, numSplits);
}
示例3: testMemoryMerge
import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
@Test(timeout=10000)
public void testMemoryMerge() throws Exception {
final int TOTAL_MEM_BYTES = 10000;
final int OUTPUT_SIZE = 7950;
JobConf conf = new JobConf();
conf.setFloat(MRJobConfig.SHUFFLE_INPUT_BUFFER_PERCENT, 1.0f);
conf.setLong(MRJobConfig.REDUCE_MEMORY_TOTAL_BYTES, TOTAL_MEM_BYTES);
conf.setFloat(MRJobConfig.SHUFFLE_MEMORY_LIMIT_PERCENT, 0.8f);
conf.setFloat(MRJobConfig.SHUFFLE_MERGE_PERCENT, 0.9f);
TestExceptionReporter reporter = new TestExceptionReporter();
CyclicBarrier mergeStart = new CyclicBarrier(2);
CyclicBarrier mergeComplete = new CyclicBarrier(2);
StubbedMergeManager mgr = new StubbedMergeManager(conf, reporter,
mergeStart, mergeComplete);
// reserve enough map output to cause a merge when it is committed
MapOutput<Text, Text> out1 = mgr.reserve(null, OUTPUT_SIZE, 0);
Assert.assertTrue("Should be a memory merge",
(out1 instanceof InMemoryMapOutput));
InMemoryMapOutput<Text, Text> mout1 = (InMemoryMapOutput<Text, Text>)out1;
fillOutput(mout1);
MapOutput<Text, Text> out2 = mgr.reserve(null, OUTPUT_SIZE, 0);
Assert.assertTrue("Should be a memory merge",
(out2 instanceof InMemoryMapOutput));
InMemoryMapOutput<Text, Text> mout2 = (InMemoryMapOutput<Text, Text>)out2;
fillOutput(mout2);
// next reservation should be a WAIT
MapOutput<Text, Text> out3 = mgr.reserve(null, OUTPUT_SIZE, 0);
Assert.assertEquals("Should be told to wait", null, out3);
// trigger the first merge and wait for merge thread to start merging
// and free enough output to reserve more
mout1.commit();
mout2.commit();
mergeStart.await();
Assert.assertEquals(1, mgr.getNumMerges());
// reserve enough map output to cause another merge when committed
out1 = mgr.reserve(null, OUTPUT_SIZE, 0);
Assert.assertTrue("Should be a memory merge",
(out1 instanceof InMemoryMapOutput));
mout1 = (InMemoryMapOutput<Text, Text>)out1;
fillOutput(mout1);
out2 = mgr.reserve(null, OUTPUT_SIZE, 0);
Assert.assertTrue("Should be a memory merge",
(out2 instanceof InMemoryMapOutput));
mout2 = (InMemoryMapOutput<Text, Text>)out2;
fillOutput(mout2);
// next reservation should be null
out3 = mgr.reserve(null, OUTPUT_SIZE, 0);
Assert.assertEquals("Should be told to wait", null, out3);
// commit output *before* merge thread completes
mout1.commit();
mout2.commit();
// allow the first merge to complete
mergeComplete.await();
// start the second merge and verify
mergeStart.await();
Assert.assertEquals(2, mgr.getNumMerges());
// trigger the end of the second merge
mergeComplete.await();
Assert.assertEquals(2, mgr.getNumMerges());
Assert.assertEquals("exception reporter invoked",
0, reporter.getNumExceptions());
}
示例4: createDataJoinJob
import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
public static JobConf createDataJoinJob(String args[]) throws IOException {
String inputDir = args[0];
String outputDir = args[1];
Class inputFormat = SequenceFileInputFormat.class;
if (args[2].compareToIgnoreCase("text") != 0) {
System.out.println("Using SequenceFileInputFormat: " + args[2]);
} else {
System.out.println("Using TextInputFormat: " + args[2]);
inputFormat = TextInputFormat.class;
}
int numOfReducers = Integer.parseInt(args[3]);
Class mapper = getClassByName(args[4]);
Class reducer = getClassByName(args[5]);
Class mapoutputValueClass = getClassByName(args[6]);
Class outputFormat = TextOutputFormat.class;
Class outputValueClass = Text.class;
if (args[7].compareToIgnoreCase("text") != 0) {
System.out.println("Using SequenceFileOutputFormat: " + args[7]);
outputFormat = SequenceFileOutputFormat.class;
outputValueClass = getClassByName(args[7]);
} else {
System.out.println("Using TextOutputFormat: " + args[7]);
}
long maxNumOfValuesPerGroup = 100;
String jobName = "";
if (args.length > 8) {
maxNumOfValuesPerGroup = Long.parseLong(args[8]);
}
if (args.length > 9) {
jobName = args[9];
}
Configuration defaults = new Configuration();
JobConf job = new JobConf(defaults, DataJoinJob.class);
job.setJobName("DataJoinJob: " + jobName);
FileSystem fs = FileSystem.get(defaults);
fs.delete(new Path(outputDir), true);
FileInputFormat.setInputPaths(job, inputDir);
job.setInputFormat(inputFormat);
job.setMapperClass(mapper);
FileOutputFormat.setOutputPath(job, new Path(outputDir));
job.setOutputFormat(outputFormat);
SequenceFileOutputFormat.setOutputCompressionType(job,
SequenceFile.CompressionType.BLOCK);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(mapoutputValueClass);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(outputValueClass);
job.setReducerClass(reducer);
job.setNumMapTasks(1);
job.setNumReduceTasks(numOfReducers);
job.setLong("datajoin.maxNumOfValuesPerGroup", maxNumOfValuesPerGroup);
return job;
}