本文整理汇总了Java中org.apache.orc.TypeDescription.createRowBatch方法的典型用法代码示例。如果您正苦于以下问题:Java TypeDescription.createRowBatch方法的具体用法?Java TypeDescription.createRowBatch怎么用?Java TypeDescription.createRowBatch使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.orc.TypeDescription
的用法示例。
在下文中一共展示了TypeDescription.createRowBatch方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: open
import org.apache.orc.TypeDescription; //导入方法依赖的package包/类
@Override
public OrcWriter<T> open(Path path) {
if (writerOptions == null) {
writerOptions = OrcFile.writerOptions(configuration);
}
if (compressionKind != null) {
writerOptions.compress(compressionKind);
}
if (bufferSize != 0) {
writerOptions.bufferSize(bufferSize);
}
// Add the schema to the writer options.
TypeDescription schema = getTypeDescription();
writerOptions.setSchema(schema);
try {
writer = OrcFile.createWriter(path, writerOptions);
} catch (IOException e) {
throw new RuntimeException(e);
}
vectorizedRowBatch = schema.createRowBatch(batchSize);
specialCaseSetup();
return this;
}
示例2: PentahoOrcRecordWriter
import org.apache.orc.TypeDescription; //导入方法依赖的package包/类
public PentahoOrcRecordWriter( SchemaDescription schemaDescription, TypeDescription schema, String filePath,
Configuration conf ) {
this.schemaDescription = schemaDescription;
this.schema = schema;
final AtomicInteger fieldNumber = new AtomicInteger(); //Mutable field count
schemaDescription.forEach( field -> setOutputMeta( fieldNumber, field ) );
outputRowMetaAndData = new RowMetaAndData( outputRowMeta, new Object[ fieldNumber.get() ] );
try {
writer = OrcFile.createWriter( new Path( filePath ),
OrcFile.writerOptions( conf )
.setSchema( schema ) );
batch = schema.createRowBatch();
} catch ( IOException e ) {
logger.error( e );
}
//Write the addition metadata for the fields
new OrcMetaDataWriter( writer ).write( schemaDescription );
}
示例3: BaseIteractor
import org.apache.orc.TypeDescription; //导入方法依赖的package包/类
BaseIteractor(RecordReader recordReader, TypeDescription schema, int size) throws IOException {
this.recordReader = recordReader;
this.schema = schema;
this.batch = schema.createRowBatch(size);
this.nextBatch();
}
示例4: flush
import org.apache.orc.TypeDescription; //导入方法依赖的package包/类
private boolean flush(BufferSegment segment, String path, TypeDescription schema)
{
Configuration conf = new Configuration();
try {
Writer writer = OrcFile.createWriter(new Path(path),
OrcFile.writerOptions(conf)
.setSchema(schema)
.stripeSize(orcFileStripeSize)
.bufferSize(orcFileBufferSize)
.blockSize(orcFileBlockSize)
.compress(CompressionKind.ZLIB)
.version(OrcFile.Version.V_0_12));
VectorizedRowBatch batch = schema.createRowBatch();
while (segment.hasNext()) {
String[] contents = segment.getNext();
int rowCount = batch.size++;
// System.out.println("contents : message.getValues() : " + Arrays.toString(contents));
System.out.println("contents.length : " + contents.length);
for (int i = 0; i < contents.length; i++) {
((BytesColumnVector) batch.cols[i]).setVal(rowCount, contents[i].getBytes());
//batch full
if (batch.size == batch.getMaxSize()) {
writer.addRowBatch(batch);
batch.reset();
}
}
if (batch.size != 0) {
writer.addRowBatch(batch);
batch.reset();
}
writer.close();
segment.setFilePath(path);
System.out.println("path : " + path);
}
return true;
}
catch (IOException e) {
e.printStackTrace();
return false;
}
}