本文整理匯總了Java中org.apache.hadoop.io.LongWritable類的典型用法代碼示例。如果您正苦於以下問題:Java LongWritable類的具體用法?Java LongWritable怎麽用?Java LongWritable使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
LongWritable類屬於org.apache.hadoop.io包,在下文中一共展示了LongWritable類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: run
import org.apache.hadoop.io.LongWritable; //導入依賴的package包/類
@Override
public int run(String[] args) throws Exception {
if (args.length != 2) {
System.err.println("Usage: wordmean <in> <out>");
return 0;
}
Configuration conf = getConf();
Job job = Job.getInstance(conf, "word mean");
job.setJarByClass(WordMean.class);
job.setMapperClass(WordMeanMapper.class);
job.setCombinerClass(WordMeanReducer.class);
job.setReducerClass(WordMeanReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(LongWritable.class);
FileInputFormat.addInputPath(job, new Path(args[0]));
Path outputpath = new Path(args[1]);
FileOutputFormat.setOutputPath(job, outputpath);
boolean result = job.waitForCompletion(true);
mean = readAndCalcMean(outputpath, conf);
return (result ? 0 : 1);
}
示例2: map
import org.apache.hadoop.io.LongWritable; //導入依賴的package包/類
public void map(LongWritable key, Text val,
OutputCollector<LongWritable, Text> output, Reporter reporter)
throws IOException {
String str = val.toString();
LOG.debug("MAP key:" +key +" value:" + str);
if(MAPPER_BAD_RECORDS.get(0).equals(str)) {
LOG.warn("MAP Encountered BAD record");
System.exit(-1);
}
else if(MAPPER_BAD_RECORDS.get(1).equals(str)) {
LOG.warn("MAP Encountered BAD record");
throw new RuntimeException("Bad record "+str);
}
else if(MAPPER_BAD_RECORDS.get(2).equals(str)) {
try {
LOG.warn("MAP Encountered BAD record");
Thread.sleep(15*60*1000);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
output.collect(key, val);
}
示例3: testStandAloneClient
import org.apache.hadoop.io.LongWritable; //導入依賴的package包/類
@Test(timeout=60000)
public void testStandAloneClient() throws IOException {
Client client = new Client(LongWritable.class, conf);
InetSocketAddress address = new InetSocketAddress("127.0.0.1", 10);
try {
client.call(new LongWritable(RANDOM.nextLong()),
address, null, null, 0, conf);
fail("Expected an exception to have been thrown");
} catch (IOException e) {
String message = e.getMessage();
String addressText = address.getHostName() + ":" + address.getPort();
assertTrue("Did not find "+addressText+" in "+message,
message.contains(addressText));
Throwable cause=e.getCause();
assertNotNull("No nested exception in "+e,cause);
String causeText=cause.getMessage();
assertTrue("Did not find " + causeText + " in " + message,
message.contains(causeText));
}
}
示例4: reduce
import org.apache.hadoop.io.LongWritable; //導入依賴的package包/類
@Override
protected void reduce(LongWritable key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
int k = context.getConfiguration().getInt("k", -1);
double[] result = new double[k];
for (Text value : values) {
String[] ai = value.toString().split(",");
for (int j = 0; j < k; j++) {
result[j] += Double.parseDouble(ai[j]);
}
}
StringBuilder res = new StringBuilder(prefix);
for (int i = 0; i < k; i++) {
res.append(result[i]);
if (i < k - 1) {
res.append(",");
}
}
context.write(key, new Text(res.toString()));
}
示例5: map
import org.apache.hadoop.io.LongWritable; //導入依賴的package包/類
public void map(LongWritable key, Text val, Context c)
throws IOException, InterruptedException {
// Create a whole bunch of objects.
List<Integer> lst = new ArrayList<Integer>();
for (int i = 0; i < 20000; i++) {
lst.add(new Integer(i));
}
// Actually use this list, to ensure that it isn't just optimized away.
int sum = 0;
for (int x : lst) {
sum += x;
}
// throw away the list and run a GC.
lst = null;
System.gc();
c.write(new LongWritable(sum), val);
}
示例6: runIOTest
import org.apache.hadoop.io.LongWritable; //導入依賴的package包/類
private void runIOTest(
Class<? extends Mapper<Text, LongWritable, Text, Text>> mapperClass,
Path outputDir) throws IOException {
JobConf job = new JobConf(config, TestDFSIO.class);
FileInputFormat.setInputPaths(job, getControlDir(config));
job.setInputFormat(SequenceFileInputFormat.class);
job.setMapperClass(mapperClass);
job.setReducerClass(AccumulatingReducer.class);
FileOutputFormat.setOutputPath(job, outputDir);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
job.setNumReduceTasks(1);
JobClient.runJob(job);
}
示例7: run
import org.apache.hadoop.io.LongWritable; //導入依賴的package包/類
@Override
public void run() {
for (int i = 0; i < count; i++) {
try {
final long param = RANDOM.nextLong();
LongWritable value = call(client, param, server, conf);
if (value.get() != param) {
LOG.fatal("Call failed!");
failed = true;
break;
}
} catch (Exception e) {
LOG.fatal("Caught: " + StringUtils.stringifyException(e));
failed = true;
}
}
}
示例8: testStandAloneClient
import org.apache.hadoop.io.LongWritable; //導入依賴的package包/類
@Test(timeout=60000)
public void testStandAloneClient() throws IOException {
Client client = new Client(LongWritable.class, conf);
InetSocketAddress address = new InetSocketAddress("127.0.0.1", 10);
try {
call(client, RANDOM.nextLong(), address, conf);
fail("Expected an exception to have been thrown");
} catch (IOException e) {
String message = e.getMessage();
String addressText = address.getHostName() + ":" + address.getPort();
assertTrue("Did not find "+addressText+" in "+message,
message.contains(addressText));
Throwable cause=e.getCause();
assertNotNull("No nested exception in "+e,cause);
String causeText=cause.getMessage();
assertTrue("Did not find " + causeText + " in " + message,
message.contains(causeText));
} finally {
client.stop();
}
}
示例9: testIpcConnectTimeout
import org.apache.hadoop.io.LongWritable; //導入依賴的package包/類
@Test(timeout=60000)
public void testIpcConnectTimeout() throws IOException {
// start server
Server server = new TestServer(1, true);
InetSocketAddress addr = NetUtils.getConnectAddress(server);
//Intentionally do not start server to get a connection timeout
// start client
Client.setConnectTimeout(conf, 100);
Client client = new Client(LongWritable.class, conf);
// set the rpc timeout to twice the MIN_SLEEP_TIME
try {
call(client, new LongWritable(RANDOM.nextLong()), addr,
MIN_SLEEP_TIME * 2, conf);
fail("Expected an exception to have been thrown");
} catch (SocketTimeoutException e) {
LOG.info("Get a SocketTimeoutException ", e);
}
client.stop();
}
示例10: getSplits
import org.apache.hadoop.io.LongWritable; //導入依賴的package包/類
public List<InputSplit> getSplits(JobContext job)
throws IOException {
Configuration conf = job.getConfiguration();
Path src = new Path(conf.get(INDIRECT_INPUT_FILE, null));
FileSystem fs = src.getFileSystem(conf);
List<InputSplit> splits = new ArrayList<InputSplit>();
LongWritable key = new LongWritable();
Text value = new Text();
for (SequenceFile.Reader sl = new SequenceFile.Reader(fs, src, conf);
sl.next(key, value);) {
splits.add(new IndirectSplit(new Path(value.toString()), key.get()));
}
return splits;
}
示例11: testChainReduceNoOuptut
import org.apache.hadoop.io.LongWritable; //導入依賴的package包/類
/**
* Tests reducer consuming output.
*
* @throws Exception
*/
public void testChainReduceNoOuptut() throws Exception {
Configuration conf = createJobConf();
String expectedOutput = "";
Job job = MapReduceTestUtil.createJob(conf, inDir, outDir, 1, 1, input);
job.setJobName("chain");
ChainMapper.addMapper(job, Mapper.class, IntWritable.class, Text.class,
LongWritable.class, Text.class, null);
ChainReducer.setReducer(job, ConsumeReduce.class, LongWritable.class,
Text.class, LongWritable.class, Text.class, null);
ChainReducer.addMapper(job, Mapper.class, LongWritable.class, Text.class,
LongWritable.class, Text.class, null);
job.waitForCompletion(true);
assertTrue("Job failed", job.isSuccessful());
assertEquals("Outputs doesn't match", expectedOutput, MapReduceTestUtil
.readOutput(outDir, conf));
}
示例12: testChainFail
import org.apache.hadoop.io.LongWritable; //導入依賴的package包/類
/**
* Tests one of the mappers throwing exception.
*
* @throws Exception
*/
public void testChainFail() throws Exception {
Configuration conf = createJobConf();
Job job = MapReduceTestUtil.createJob(conf, inDir, outDir, 1, 0, input);
job.setJobName("chain");
ChainMapper.addMapper(job, Mapper.class, LongWritable.class, Text.class,
LongWritable.class, Text.class, null);
ChainMapper.addMapper(job, FailMap.class, LongWritable.class, Text.class,
IntWritable.class, Text.class, null);
ChainMapper.addMapper(job, Mapper.class, IntWritable.class, Text.class,
LongWritable.class, Text.class, null);
job.waitForCompletion(true);
assertTrue("Job Not failed", !job.isSuccessful());
}
示例13: seekTest
import org.apache.hadoop.io.LongWritable; //導入依賴的package包/類
public static void seekTest(FileSystem fs, boolean fastCheck)
throws Exception {
fs.delete(READ_DIR, true);
JobConf job = new JobConf(conf, TestFileSystem.class);
job.setBoolean("fs.test.fastCheck", fastCheck);
FileInputFormat.setInputPaths(job,CONTROL_DIR);
job.setInputFormat(SequenceFileInputFormat.class);
job.setMapperClass(SeekMapper.class);
job.setReducerClass(LongSumReducer.class);
FileOutputFormat.setOutputPath(job, READ_DIR);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(LongWritable.class);
job.setNumReduceTasks(1);
JobClient.runJob(job);
}
示例14: testNestedIterable
import org.apache.hadoop.io.LongWritable; //導入依賴的package包/類
public void testNestedIterable() throws Exception {
Random r = new Random();
Writable[] writs = {
new BooleanWritable(r.nextBoolean()),
new FloatWritable(r.nextFloat()),
new FloatWritable(r.nextFloat()),
new IntWritable(r.nextInt()),
new LongWritable(r.nextLong()),
new BytesWritable("dingo".getBytes()),
new LongWritable(r.nextLong()),
new IntWritable(r.nextInt()),
new BytesWritable("yak".getBytes()),
new IntWritable(r.nextInt())
};
TupleWritable sTuple = makeTuple(writs);
assertTrue("Bad count", writs.length == verifIter(writs, sTuple, 0));
}
示例15: map
import org.apache.hadoop.io.LongWritable; //導入依賴的package包/類
/**
* Emits random words sequence of desired size. Note that the desired output
* size is passed as the value parameter to this map.
*/
@Override
public void map(NullWritable key, LongWritable value, Context context)
throws IOException, InterruptedException {
//TODO Control the extra data written ..
//TODO Should the key\tvalue\n be considered for measuring size?
// Can counters like BYTES_WRITTEN be used? What will be the value of
// such counters in LocalJobRunner?
for (long bytes = value.get(); bytes > 0;) {
String randomKey = rtg.getRandomWord();
String randomValue = rtg.getRandomWord();
context.write(new Text(randomKey), new Text(randomValue));
bytes -= (randomValue.getBytes(charsetUTF8).length +
randomKey.getBytes(charsetUTF8).length);
}
}