本文整理匯總了Java中org.apache.hadoop.mapred.FileOutputFormat.getOutputPath方法的典型用法代碼示例。如果您正苦於以下問題:Java FileOutputFormat.getOutputPath方法的具體用法?Java FileOutputFormat.getOutputPath怎麽用?Java FileOutputFormat.getOutputPath使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.mapred.FileOutputFormat
的用法示例。
在下文中一共展示了FileOutputFormat.getOutputPath方法的8個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: getRecordWriter
import org.apache.hadoop.mapred.FileOutputFormat; //導入方法依賴的package包/類
public RecordWriter<WritableComparable<?>, Writable> getRecordWriter(
final FileSystem fs, JobConf job, String name,
final Progressable progress) throws IOException {
final Path segmentDumpFile = new Path(
FileOutputFormat.getOutputPath(job), name);
// Get the old copy out of the way
if (fs.exists(segmentDumpFile))
fs.delete(segmentDumpFile, true);
final PrintStream printStream = new PrintStream(
fs.create(segmentDumpFile));
return new RecordWriter<WritableComparable<?>, Writable>() {
public synchronized void write(WritableComparable<?> key, Writable value)
throws IOException {
printStream.println(value);
}
public synchronized void close(Reporter reporter) throws IOException {
printStream.close();
}
};
}
示例2: getRecordWriter
import org.apache.hadoop.mapred.FileOutputFormat; //導入方法依賴的package包/類
@Override
public RecordWriter<NullWritable, DynamoDBItemWritable> getRecordWriter(FileSystem ignored,
JobConf job, String name, Progressable progress) throws IOException {
boolean isCompressed = getCompressOutput(job);
CompressionCodec codec = null;
String extension = "";
DataOutputStream fileOut;
if (isCompressed) {
Class<? extends CompressionCodec> codecClass = getOutputCompressorClass(job, GzipCodec.class);
codec = ReflectionUtils.newInstance(codecClass, job);
extension = codec.getDefaultExtension();
}
Path file = new Path(FileOutputFormat.getOutputPath(job), name + extension);
FileSystem fs = file.getFileSystem(job);
if (!isCompressed) {
fileOut = fs.create(file, progress);
} else {
fileOut = new DataOutputStream(codec.createOutputStream(fs.create(file, progress)));
}
return new ExportRecordWriter(fileOut);
}
示例3: checkOutputSpecs
import org.apache.hadoop.mapred.FileOutputFormat; //導入方法依賴的package包/類
public void checkOutputSpecs(FileSystem fs, JobConf job) throws IOException {
Path out = FileOutputFormat.getOutputPath(job);
if ((out == null) && (job.getNumReduceTasks() != 0)) {
throw new InvalidJobConfException(
"Output directory not set in JobConf.");
}
if (fs == null) {
fs = out.getFileSystem(job);
}
if (fs.exists(new Path(out, CrawlDatum.PARSE_DIR_NAME)))
throw new IOException("Segment already parsed!");
}
示例4: checkOutputSpecs
import org.apache.hadoop.mapred.FileOutputFormat; //導入方法依賴的package包/類
public void checkOutputSpecs(FileSystem fs, JobConf job) throws IOException {
Path out = FileOutputFormat.getOutputPath(job);
if ((out == null) && (job.getNumReduceTasks() != 0)) {
throw new InvalidJobConfException("Output directory not set in JobConf.");
}
if (fs == null) {
fs = out.getFileSystem(job);
}
if (fs.exists(new Path(out, CrawlDatum.FETCH_DIR_NAME)))
throw new IOException("Segment already fetched!");
}
示例5: getRecordWriter
import org.apache.hadoop.mapred.FileOutputFormat; //導入方法依賴的package包/類
@Override
public RecordWriter<K, Text> getRecordWriter(FileSystem ignored, JobConf job, String name,
Progressable progress) throws IOException {
String extension = "";
Path file = FileOutputFormat.getTaskOutputPath(job, MANIFEST_FILENAME);
FileSystem fs = file.getFileSystem(job);
FSDataOutputStream fileOut = fs.create(file, progress);
if (getCompressOutput(job)) {
Class<? extends CompressionCodec> codecClass = getOutputCompressorClass(job, GzipCodec.class);
CompressionCodec codec = ReflectionUtils.newInstance(codecClass, job);
extension = codec.getDefaultExtension();
}
return new ExportManifestRecordWriter<>(fileOut, FileOutputFormat.getOutputPath(job),
extension);
}
示例6: writeFile
import org.apache.hadoop.mapred.FileOutputFormat; //導入方法依賴的package包/類
private void writeFile(JobConf conf , String filename) throws IOException {
System.out.println("writing file ----" + filename);
Path outputPath = FileOutputFormat.getOutputPath(conf);
FileSystem fs = outputPath.getFileSystem(conf);
fs.create(new Path(outputPath, filename)).close();
}
示例7: getRecordWriter
import org.apache.hadoop.mapred.FileOutputFormat; //導入方法依賴的package包/類
public RecordWriter<Text, NutchWritable> getRecordWriter(final FileSystem fs,
final JobConf job, final String name, final Progressable progress)
throws IOException {
Path out = FileOutputFormat.getOutputPath(job);
final Path fetch = new Path(new Path(out, CrawlDatum.FETCH_DIR_NAME), name);
final Path content = new Path(new Path(out, Content.DIR_NAME), name);
final CompressionType compType = SequenceFileOutputFormat
.getOutputCompressionType(job);
Option fKeyClassOpt = MapFile.Writer.keyClass(Text.class);
org.apache.hadoop.io.SequenceFile.Writer.Option fValClassOpt = SequenceFile.Writer.valueClass(CrawlDatum.class);
org.apache.hadoop.io.SequenceFile.Writer.Option fProgressOpt = SequenceFile.Writer.progressable(progress);
org.apache.hadoop.io.SequenceFile.Writer.Option fCompOpt = SequenceFile.Writer.compression(compType);
final MapFile.Writer fetchOut = new MapFile.Writer(job,
fetch, fKeyClassOpt, fValClassOpt, fCompOpt, fProgressOpt);
return new RecordWriter<Text, NutchWritable>() {
private MapFile.Writer contentOut;
private RecordWriter<Text, Parse> parseOut;
{
if (Fetcher.isStoringContent(job)) {
Option cKeyClassOpt = MapFile.Writer.keyClass(Text.class);
org.apache.hadoop.io.SequenceFile.Writer.Option cValClassOpt = SequenceFile.Writer.valueClass(Content.class);
org.apache.hadoop.io.SequenceFile.Writer.Option cProgressOpt = SequenceFile.Writer.progressable(progress);
org.apache.hadoop.io.SequenceFile.Writer.Option cCompOpt = SequenceFile.Writer.compression(compType);
contentOut = new MapFile.Writer(job, content,
cKeyClassOpt, cValClassOpt, cCompOpt, cProgressOpt);
}
if (Fetcher.isParsing(job)) {
parseOut = new ParseOutputFormat().getRecordWriter(fs, job, name,
progress);
}
}
public void write(Text key, NutchWritable value) throws IOException {
Writable w = value.get();
if (w instanceof CrawlDatum)
fetchOut.append(key, w);
else if (w instanceof Content && contentOut != null)
contentOut.append(key, w);
else if (w instanceof Parse && parseOut != null)
parseOut.write(key, (Parse) w);
}
public void close(Reporter reporter) throws IOException {
fetchOut.close();
if (contentOut != null) {
contentOut.close();
}
if (parseOut != null) {
parseOut.close(reporter);
}
}
};
}
示例8: getRecordWriter
import org.apache.hadoop.mapred.FileOutputFormat; //導入方法依賴的package包/類
public RecordWriter<Text, CrawlDatum> getRecordWriter(FileSystem fs,
JobConf job, String name, Progressable progress) throws IOException {
Path dir = FileOutputFormat.getOutputPath(job);
DataOutputStream fileOut = fs.create(new Path(dir, name), progress);
return new LineRecordWriter(fileOut);
}