本文整理汇总了Java中org.apache.hadoop.io.NullWritable.get方法的典型用法代码示例。如果您正苦于以下问题:Java NullWritable.get方法的具体用法?Java NullWritable.get怎么用?Java NullWritable.get使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.io.NullWritable
的用法示例。
在下文中一共展示了NullWritable.get方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: writeOutput
import org.apache.hadoop.io.NullWritable; //导入方法依赖的package包/类
private void writeOutput(TaskAttempt attempt, Configuration conf)
throws Exception {
TaskAttemptContext tContext = new TaskAttemptContextImpl(conf,
TypeConverter.fromYarn(attempt.getID()));
TextOutputFormat<?, ?> theOutputFormat = new TextOutputFormat();
RecordWriter theRecordWriter = theOutputFormat
.getRecordWriter(tContext);
NullWritable nullWritable = NullWritable.get();
try {
theRecordWriter.write(key1, val1);
theRecordWriter.write(null, nullWritable);
theRecordWriter.write(null, val1);
theRecordWriter.write(nullWritable, val2);
theRecordWriter.write(key2, nullWritable);
theRecordWriter.write(key1, null);
theRecordWriter.write(null, null);
theRecordWriter.write(key2, val2);
} finally {
theRecordWriter.close(tContext);
}
OutputFormat outputFormat = ReflectionUtils.newInstance(
tContext.getOutputFormatClass(), conf);
OutputCommitter committer = outputFormat.getOutputCommitter(tContext);
committer.commitTask(tContext);
}
示例2: getRecordReader
import org.apache.hadoop.io.NullWritable; //导入方法依赖的package包/类
public RecordReader<NullWritable,NullWritable> getRecordReader(
InputSplit ignored, JobConf conf, Reporter reporter) {
return new RecordReader<NullWritable,NullWritable>() {
private boolean done = false;
public boolean next(NullWritable key, NullWritable value)
throws IOException {
if (done)
return false;
done = true;
return true;
}
public NullWritable createKey() { return NullWritable.get(); }
public NullWritable createValue() { return NullWritable.get(); }
public long getPos() throws IOException { return 0L; }
public void close() throws IOException { }
public float getProgress() throws IOException { return 0.0f; }
};
}
示例3: testTotalOrderMemCmp
import org.apache.hadoop.io.NullWritable; //导入方法依赖的package包/类
public void testTotalOrderMemCmp() throws Exception {
TotalOrderPartitioner<Text,NullWritable> partitioner =
new TotalOrderPartitioner<Text,NullWritable>();
Configuration conf = new Configuration();
Path p = TestTotalOrderPartitioner.<Text>writePartitionFile(
"totalordermemcmp", conf, splitStrings);
conf.setClass(MRJobConfig.MAP_OUTPUT_KEY_CLASS, Text.class, Object.class);
try {
partitioner.setConf(conf);
NullWritable nw = NullWritable.get();
for (Check<Text> chk : testStrings) {
assertEquals(chk.data.toString(), chk.part,
partitioner.getPartition(chk.data, nw, splitStrings.length + 1));
}
} finally {
p.getFileSystem(conf).delete(p, true);
}
}
示例4: writeOutput
import org.apache.hadoop.io.NullWritable; //导入方法依赖的package包/类
private void writeOutput(RecordWriter theRecordWriter,
TaskAttemptContext context) throws IOException, InterruptedException {
NullWritable nullWritable = NullWritable.get();
try {
theRecordWriter.write(key1, val1);
theRecordWriter.write(null, nullWritable);
theRecordWriter.write(null, val1);
theRecordWriter.write(nullWritable, val2);
theRecordWriter.write(key2, nullWritable);
theRecordWriter.write(key1, null);
theRecordWriter.write(null, null);
theRecordWriter.write(key2, val2);
} finally {
theRecordWriter.close(null);
}
}
示例5: createKey
import org.apache.hadoop.io.NullWritable; //导入方法依赖的package包/类
/**
* Request new key from proxied RR.
*/
@SuppressWarnings("unchecked")
public K createKey() {
if (keyclass != null) {
return (K) ReflectionUtils.newInstance(keyclass, conf);
}
return (K) NullWritable.get();
}
示例6: nextKeyValue
import org.apache.hadoop.io.NullWritable; //导入方法依赖的package包/类
@Override
public boolean nextKeyValue() throws IOException, InterruptedException {
if(readOver) {
return false;
}
key = NullWritable.get();
value = (PeInputSplit)split;
readOver = true;
return true;
}
示例7: getRecordReader
import org.apache.hadoop.io.NullWritable; //导入方法依赖的package包/类
@Override
public org.apache.hadoop.mapred.RecordReader<IntWritable, NullWritable>
getRecordReader(final org.apache.hadoop.mapred.InputSplit split,
JobConf job, Reporter reporter) throws IOException {
return new org.apache.hadoop.mapred.RecordReader
<IntWritable, NullWritable>() {
private final IntWritable i =
new IntWritable(((MapredSequentialSplit)split).getInit());
private int maxVal = i.get() + maxDepth + 1;
@Override
public boolean next(IntWritable key, NullWritable value)
throws IOException {
i.set(i.get() + 1);
return i.get() < maxVal;
}
@Override
public IntWritable createKey() {
return new IntWritable(i.get());
}
@Override
public NullWritable createValue() {
return NullWritable.get();
}
@Override
public long getPos() throws IOException {
return 0;
}
@Override
public void close() throws IOException {
}
@Override
public float getProgress() throws IOException {
return 0;
}
};
}
示例8: createRecordReader
import org.apache.hadoop.io.NullWritable; //导入方法依赖的package包/类
@Override
public RecordReader<NullWritable, NullWritable> createRecordReader(
InputSplit split,
TaskAttemptContext tac) throws IOException, InterruptedException {
return new SingleRecordReader<NullWritable, NullWritable>(
NullWritable.get(), NullWritable.get());
}
示例9: testTotalOrderCustomComparator
import org.apache.hadoop.io.NullWritable; //导入方法依赖的package包/类
public void testTotalOrderCustomComparator() throws Exception {
TotalOrderPartitioner<Text,NullWritable> partitioner =
new TotalOrderPartitioner<Text,NullWritable>();
Configuration conf = new Configuration();
Text[] revSplitStrings = Arrays.copyOf(splitStrings, splitStrings.length);
Arrays.sort(revSplitStrings, new ReverseStringComparator());
Path p = TestTotalOrderPartitioner.<Text>writePartitionFile(
"totalordercustomcomparator", conf, revSplitStrings);
conf.setBoolean(TotalOrderPartitioner.NATURAL_ORDER, false);
conf.setClass(MRJobConfig.MAP_OUTPUT_KEY_CLASS, Text.class, Object.class);
conf.setClass(MRJobConfig.KEY_COMPARATOR,
ReverseStringComparator.class, RawComparator.class);
ArrayList<Check<Text>> revCheck = new ArrayList<Check<Text>>();
revCheck.add(new Check<Text>(new Text("aaaaa"), 9));
revCheck.add(new Check<Text>(new Text("aaabb"), 9));
revCheck.add(new Check<Text>(new Text("aabbb"), 9));
revCheck.add(new Check<Text>(new Text("aaaaa"), 9));
revCheck.add(new Check<Text>(new Text("babbb"), 8));
revCheck.add(new Check<Text>(new Text("baabb"), 8));
revCheck.add(new Check<Text>(new Text("yai"), 1));
revCheck.add(new Check<Text>(new Text("yak"), 1));
revCheck.add(new Check<Text>(new Text("z"), 0));
revCheck.add(new Check<Text>(new Text("ddngo"), 4));
revCheck.add(new Check<Text>(new Text("hi"), 3));
try {
partitioner.setConf(conf);
NullWritable nw = NullWritable.get();
for (Check<Text> chk : revCheck) {
assertEquals(chk.data.toString(), chk.part,
partitioner.getPartition(chk.data, nw, splitStrings.length + 1));
}
} finally {
p.getFileSystem(conf).delete(p, true);
}
}
示例10: getCurrentValue
import org.apache.hadoop.io.NullWritable; //导入方法依赖的package包/类
public NullWritable getCurrentValue() {
return NullWritable.get();
}
示例11: getCurrentKey
import org.apache.hadoop.io.NullWritable; //导入方法依赖的package包/类
@Override
public NullWritable getCurrentKey() throws IOException, InterruptedException {
return NullWritable.get();
}
示例12: getCurrentValue
import org.apache.hadoop.io.NullWritable; //导入方法依赖的package包/类
@Override
public NullWritable getCurrentValue() {
return NullWritable.get();
}
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:5,代码来源:NetezzaExternalTableInputFormat.java
示例13: getCurrentValue
import org.apache.hadoop.io.NullWritable; //导入方法依赖的package包/类
@Override
public NullWritable getCurrentValue() throws IOException, InterruptedException {
return NullWritable.get();
}
示例14: getCurrentValue
import org.apache.hadoop.io.NullWritable; //导入方法依赖的package包/类
@Override
public NullWritable getCurrentValue() { return NullWritable.get(); }
示例15: testFormat
import org.apache.hadoop.io.NullWritable; //导入方法依赖的package包/类
@Test
public void testFormat() throws Exception {
JobConf job = new JobConf();
job.set(JobContext.TASK_ATTEMPT_ID, attempt);
FileOutputFormat.setOutputPath(job, workDir.getParent().getParent());
FileOutputFormat.setWorkOutputPath(job, workDir);
FileSystem fs = workDir.getFileSystem(job);
if (!fs.mkdirs(workDir)) {
fail("Failed to create output directory");
}
String file = "test_format.txt";
// A reporter that does nothing
Reporter reporter = Reporter.NULL;
TextOutputFormat<Object,Object> theOutputFormat = new TextOutputFormat<Object,Object>();
RecordWriter<Object,Object> theRecordWriter =
theOutputFormat.getRecordWriter(localFs, job, file, reporter);
Text key1 = new Text("key1");
Text key2 = new Text("key2");
Text val1 = new Text("val1");
Text val2 = new Text("val2");
NullWritable nullWritable = NullWritable.get();
try {
theRecordWriter.write(key1, val1);
theRecordWriter.write(null, nullWritable);
theRecordWriter.write(null, val1);
theRecordWriter.write(nullWritable, val2);
theRecordWriter.write(key2, nullWritable);
theRecordWriter.write(key1, null);
theRecordWriter.write(null, null);
theRecordWriter.write(key2, val2);
} finally {
theRecordWriter.close(reporter);
}
File expectedFile = new File(new Path(workDir, file).toString());
StringBuffer expectedOutput = new StringBuffer();
expectedOutput.append(key1).append('\t').append(val1).append("\n");
expectedOutput.append(val1).append("\n");
expectedOutput.append(val2).append("\n");
expectedOutput.append(key2).append("\n");
expectedOutput.append(key1).append("\n");
expectedOutput.append(key2).append('\t').append(val2).append("\n");
String output = UtilsForTests.slurp(expectedFile);
assertEquals(expectedOutput.toString(), output);
}