本文整理汇总了Java中org.apache.trevni.avro.HadoopInput类的典型用法代码示例。如果您正苦于以下问题:Java HadoopInput类的具体用法?Java HadoopInput怎么用?Java HadoopInput使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
HadoopInput类属于org.apache.trevni.avro包,在下文中一共展示了HadoopInput类的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getAvroSchema
import org.apache.trevni.avro.HadoopInput; //导入依赖的package包/类
@Override
public Schema getAvroSchema(Path p, final Job job) throws IOException {
FileSystem fs = FileSystem.get(p.toUri(), job.getConfiguration());
FileStatus[] statusArray = fs.globStatus(p, VISIBLE_FILES);
if (statusArray == null) {
throw new IOException("Path " + p.toString() + " does not exist.");
}
if (statusArray.length == 0) {
throw new IOException("No path matches pattern " + p.toString());
}
Path filePath = depthFirstSearchForFile(statusArray, fs);
if (filePath == null) {
throw new IOException("No path matches pattern " + p.toString());
}
AvroColumnReader.Params params =
new AvroColumnReader.Params(
new HadoopInput(filePath, job.getConfiguration()));
AvroColumnReader<GenericData.Record> reader =
new AvroColumnReader<GenericData.Record>(params);
Schema s = reader.getFileSchema();
reader.close();
return s;
}
示例2: getAvroSchema
import org.apache.trevni.avro.HadoopInput; //导入依赖的package包/类
@Override
public Schema getAvroSchema(Path p[], final Job job) throws IOException {
ArrayList<FileStatus> statusList = new ArrayList<FileStatus>();
FileSystem fs = FileSystem.get(p[0].toUri(), job.getConfiguration());
for (Path temp : p) {
for (FileStatus tempf : fs.globStatus(temp, Utils.VISIBLE_FILES)) {
statusList.add(tempf);
}
}
FileStatus[] statusArray = (FileStatus[]) statusList
.toArray(new FileStatus[statusList.size()]);
if (statusArray == null) {
throw new IOException("Path " + p.toString() + " does not exist.");
}
if (statusArray.length == 0) {
throw new IOException("No path matches pattern " + p.toString());
}
Path filePath = Utils.depthFirstSearchForFile(statusArray, fs);
if (filePath == null) {
throw new IOException("No path matches pattern " + p.toString());
}
AvroColumnReader.Params params =
new AvroColumnReader.Params(
new HadoopInput(filePath, job.getConfiguration()));
AvroColumnReader<GenericData.Record> reader =
new AvroColumnReader<GenericData.Record>(params);
Schema s = reader.getFileSchema();
reader.close();
return s;
}
示例3: TrevniScanner
import org.apache.trevni.avro.HadoopInput; //导入依赖的package包/类
public TrevniScanner(Configuration conf, Schema schema, TableMeta meta, FileFragment fragment) throws IOException {
super(conf, schema, meta, fragment);
reader = new ColumnFileReader(new HadoopInput(fragment.getPath(), conf));
}