本文整理汇总了Java中parquet.hadoop.metadata.ColumnChunkMetaData.getValueCount方法的典型用法代码示例。如果您正苦于以下问题:Java ColumnChunkMetaData.getValueCount方法的具体用法?Java ColumnChunkMetaData.getValueCount怎么用?Java ColumnChunkMetaData.getValueCount使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类parquet.hadoop.metadata.ColumnChunkMetaData
的用法示例。
在下文中一共展示了ColumnChunkMetaData.getValueCount方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: showDetails
import parquet.hadoop.metadata.ColumnChunkMetaData; //导入方法依赖的package包/类
private static void showDetails(PrettyPrintWriter out, ColumnChunkMetaData meta, boolean name) {
long doff = meta.getDictionaryPageOffset();
long foff = meta.getFirstDataPageOffset();
long tsize = meta.getTotalSize();
long usize = meta.getTotalUncompressedSize();
long count = meta.getValueCount();
double ratio = usize / (double)tsize;
String encodings = Joiner.on(',').skipNulls().join(meta.getEncodings());
if (name) {
String path = Joiner.on('.').skipNulls().join(meta.getPath());
out.format("%s: ", path);
}
out.format(" %s", meta.getType());
out.format(" %s", meta.getCodec());
out.format(" DO:%d", doff);
out.format(" FPO:%d", foff);
out.format(" SZ:%d/%d/%.2f", tsize, usize, ratio);
out.format(" VC:%d", count);
if (!encodings.isEmpty()) out.format(" ENC:%s", encodings);
out.println();
}
示例2: addRowGroup
import parquet.hadoop.metadata.ColumnChunkMetaData; //导入方法依赖的package包/类
private void addRowGroup(ParquetMetadata parquetMetadata, List<RowGroup> rowGroups, BlockMetaData block) {
//rowGroup.total_byte_size = ;
List<ColumnChunkMetaData> columns = block.getColumns();
List<ColumnChunk> parquetColumns = new ArrayList<ColumnChunk>();
for (ColumnChunkMetaData columnMetaData : columns) {
ColumnChunk columnChunk = new ColumnChunk(columnMetaData.getFirstDataPageOffset()); // verify this is the right offset
columnChunk.file_path = block.getPath(); // they are in the same file for now
columnChunk.meta_data = new parquet.format.ColumnMetaData(
getType(columnMetaData.getType()),
toFormatEncodings(columnMetaData.getEncodings()),
Arrays.asList(columnMetaData.getPath().toArray()),
columnMetaData.getCodec().getParquetCompressionCodec(),
columnMetaData.getValueCount(),
columnMetaData.getTotalUncompressedSize(),
columnMetaData.getTotalSize(),
columnMetaData.getFirstDataPageOffset());
columnChunk.meta_data.dictionary_page_offset = columnMetaData.getDictionaryPageOffset();
if (!columnMetaData.getStatistics().isEmpty()) {
columnChunk.meta_data.setStatistics(toParquetStatistics(columnMetaData.getStatistics()));
}
// columnChunk.meta_data.index_page_offset = ;
// columnChunk.meta_data.key_value_metadata = ; // nothing yet
parquetColumns.add(columnChunk);
}
RowGroup rowGroup = new RowGroup(parquetColumns, block.getTotalByteSize(), block.getRowCount());
rowGroups.add(rowGroup);
}
示例3: isAllNulls
import parquet.hadoop.metadata.ColumnChunkMetaData; //导入方法依赖的package包/类
private boolean isAllNulls(ColumnChunkMetaData column) {
return column.getStatistics().getNumNulls() == column.getValueCount();
}