本文整理汇总了Java中org.apache.parquet.hadoop.metadata.ColumnChunkMetaData.get方法的典型用法代码示例。如果您正苦于以下问题:Java ColumnChunkMetaData.get方法的具体用法?Java ColumnChunkMetaData.get怎么用?Java ColumnChunkMetaData.get使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.parquet.hadoop.metadata.ColumnChunkMetaData
的用法示例。
在下文中一共展示了ColumnChunkMetaData.get方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: makeBlockFromStats
import org.apache.parquet.hadoop.metadata.ColumnChunkMetaData; //导入方法依赖的package包/类
public static BlockMetaData makeBlockFromStats(IntStatistics stats, long valueCount) {
BlockMetaData blockMetaData = new BlockMetaData();
ColumnChunkMetaData column = ColumnChunkMetaData.get(ColumnPath.get("foo"),
PrimitiveTypeName.INT32,
CompressionCodecName.GZIP,
new HashSet<Encoding>(Arrays.asList(Encoding.PLAIN)),
stats,
100l, 100l, valueCount, 100l, 100l);
blockMetaData.addColumn(column);
blockMetaData.setTotalByteSize(200l);
blockMetaData.setRowCount(valueCount);
return blockMetaData;
}
示例2: newBlock
import org.apache.parquet.hadoop.metadata.ColumnChunkMetaData; //导入方法依赖的package包/类
private BlockMetaData newBlock(long start, long compressedBlockSize) {
BlockMetaData blockMetaData = new BlockMetaData();
long uncompressedSize = compressedBlockSize * 2;//assuming the compression ratio is 2
ColumnChunkMetaData column = ColumnChunkMetaData.get(ColumnPath.get("foo"),
PrimitiveTypeName.BINARY,
CompressionCodecName.GZIP,
new HashSet<Encoding>(Arrays.asList(Encoding.PLAIN)),
new BinaryStatistics(),
start, 0l, 0l, compressedBlockSize, uncompressedSize);
blockMetaData.addColumn(column);
blockMetaData.setTotalByteSize(uncompressedSize);
return blockMetaData;
}
示例3: getIntColumnMeta
import org.apache.parquet.hadoop.metadata.ColumnChunkMetaData; //导入方法依赖的package包/类
private static ColumnChunkMetaData getIntColumnMeta(IntStatistics stats, long valueCount) {
return ColumnChunkMetaData.get(ColumnPath.get("int", "column"),
PrimitiveTypeName.INT32,
CompressionCodecName.GZIP,
new HashSet<Encoding>(Arrays.asList(Encoding.PLAIN)),
stats,
0L, 0L, valueCount, 0L, 0L);
}
示例4: getDoubleColumnMeta
import org.apache.parquet.hadoop.metadata.ColumnChunkMetaData; //导入方法依赖的package包/类
private static ColumnChunkMetaData getDoubleColumnMeta(DoubleStatistics stats, long valueCount) {
return ColumnChunkMetaData.get(ColumnPath.get("double", "column"),
PrimitiveTypeName.DOUBLE,
CompressionCodecName.GZIP,
new HashSet<Encoding>(Arrays.asList(Encoding.PLAIN)),
stats,
0L, 0L, valueCount, 0L, 0L);
}
示例5: createColumnChunkMetaData
import org.apache.parquet.hadoop.metadata.ColumnChunkMetaData; //导入方法依赖的package包/类
private ColumnChunkMetaData createColumnChunkMetaData() {
Set<org.apache.parquet.column.Encoding> e = new HashSet<org.apache.parquet.column.Encoding>();
PrimitiveTypeName t = PrimitiveTypeName.BINARY;
ColumnPath p = ColumnPath.get("foo");
CompressionCodecName c = CompressionCodecName.GZIP;
BinaryStatistics s = new BinaryStatistics();
ColumnChunkMetaData md = ColumnChunkMetaData.get(p, t, c, e, s,
0, 0, 0, 0, 0);
return md;
}
示例6: fromParquetMetadata
import org.apache.parquet.hadoop.metadata.ColumnChunkMetaData; //导入方法依赖的package包/类
public ParquetMetadata fromParquetMetadata(FileMetaData parquetMetadata) throws IOException {
MessageType messageType = fromParquetSchema(parquetMetadata.getSchema(), parquetMetadata.getColumn_orders());
List<BlockMetaData> blocks = new ArrayList<BlockMetaData>();
List<RowGroup> row_groups = parquetMetadata.getRow_groups();
if (row_groups != null) {
for (RowGroup rowGroup : row_groups) {
BlockMetaData blockMetaData = new BlockMetaData();
blockMetaData.setRowCount(rowGroup.getNum_rows());
blockMetaData.setTotalByteSize(rowGroup.getTotal_byte_size());
List<ColumnChunk> columns = rowGroup.getColumns();
String filePath = columns.get(0).getFile_path();
for (ColumnChunk columnChunk : columns) {
if ((filePath == null && columnChunk.getFile_path() != null)
|| (filePath != null && !filePath.equals(columnChunk.getFile_path()))) {
throw new ParquetDecodingException("all column chunks of the same row group must be in the same file for now");
}
ColumnMetaData metaData = columnChunk.meta_data;
ColumnPath path = getPath(metaData);
ColumnChunkMetaData column = ColumnChunkMetaData.get(
path,
messageType.getType(path.toArray()).asPrimitiveType(),
fromFormatCodec(metaData.codec),
convertEncodingStats(metaData.getEncoding_stats()),
fromFormatEncodings(metaData.encodings),
fromParquetStatistics(
parquetMetadata.getCreated_by(),
metaData.statistics,
messageType.getType(path.toArray()).asPrimitiveType()),
metaData.data_page_offset,
metaData.dictionary_page_offset,
metaData.num_values,
metaData.total_compressed_size,
metaData.total_uncompressed_size);
// TODO
// index_page_offset
// key_value_metadata
blockMetaData.addColumn(column);
}
blockMetaData.setPath(filePath);
blocks.add(blockMetaData);
}
}
Map<String, String> keyValueMetaData = new HashMap<String, String>();
List<KeyValue> key_value_metadata = parquetMetadata.getKey_value_metadata();
if (key_value_metadata != null) {
for (KeyValue keyValue : key_value_metadata) {
keyValueMetaData.put(keyValue.key, keyValue.value);
}
}
return new ParquetMetadata(
new org.apache.parquet.hadoop.metadata.FileMetaData(messageType, keyValueMetaData, parquetMetadata.getCreated_by()),
blocks);
}