当前位置: 首页>>代码示例>>Java>>正文


Java BlockMetaData.setRowCount方法代码示例

本文整理汇总了Java中parquet.hadoop.metadata.BlockMetaData.setRowCount方法的典型用法代码示例。如果您正苦于以下问题:Java BlockMetaData.setRowCount方法的具体用法?Java BlockMetaData.setRowCount怎么用?Java BlockMetaData.setRowCount使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在parquet.hadoop.metadata.BlockMetaData的用法示例。


在下文中一共展示了BlockMetaData.setRowCount方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: makeBlockFromStats

import parquet.hadoop.metadata.BlockMetaData; //导入方法依赖的package包/类
public static BlockMetaData makeBlockFromStats(IntStatistics stats, long valueCount) {
    BlockMetaData blockMetaData = new BlockMetaData();

    ColumnChunkMetaData column = ColumnChunkMetaData.get(ColumnPath.get("foo"),
            PrimitiveTypeName.INT32,
            CompressionCodecName.GZIP,
            new HashSet<Encoding>(Arrays.asList(Encoding.PLAIN)),
            stats,
            100l, 100l, valueCount, 100l, 100l);
    blockMetaData.addColumn(column);
    blockMetaData.setTotalByteSize(200l);
    blockMetaData.setRowCount(valueCount);
    return blockMetaData;
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:15,代码来源:TestInputFormat.java

示例2: fromParquetMetadata

import parquet.hadoop.metadata.BlockMetaData; //导入方法依赖的package包/类
public ParquetMetadata fromParquetMetadata(FileMetaData parquetMetadata) throws IOException {
    MessageType messageType = fromParquetSchema(parquetMetadata.getSchema());
    List<BlockMetaData> blocks = new ArrayList<BlockMetaData>();
    List<RowGroup> row_groups = parquetMetadata.getRow_groups();
    if (row_groups != null) {
        for (RowGroup rowGroup : row_groups) {
            BlockMetaData blockMetaData = new BlockMetaData();
            blockMetaData.setRowCount(rowGroup.getNum_rows());
            blockMetaData.setTotalByteSize(rowGroup.getTotal_byte_size());
            List<ColumnChunk> columns = rowGroup.getColumns();
            String filePath = columns.get(0).getFile_path();
            for (ColumnChunk columnChunk : columns) {
                if ((filePath == null && columnChunk.getFile_path() != null)
                        || (filePath != null && !filePath.equals(columnChunk.getFile_path()))) {
                    throw new ParquetDecodingException("all column chunks of the same row group must be in the same file for now");
                }
                parquet.format.ColumnMetaData metaData = columnChunk.meta_data;
                ColumnPath path = getPath(metaData);
                ColumnChunkMetaData column = ColumnChunkMetaData.get(
                        path,
                        messageType.getType(path.toArray()).asPrimitiveType().getPrimitiveTypeName(),
                        CompressionCodecName.fromParquet(metaData.codec),
                        fromFormatEncodings(metaData.encodings),
                        fromParquetStatistics(metaData.statistics, messageType.getType(path.toArray()).asPrimitiveType().getPrimitiveTypeName()),
                        metaData.data_page_offset,
                        metaData.dictionary_page_offset,
                        metaData.num_values,
                        metaData.total_compressed_size,
                        metaData.total_uncompressed_size);
                // TODO
                // index_page_offset
                // key_value_metadata
                blockMetaData.addColumn(column);
            }
            blockMetaData.setPath(filePath);
            blocks.add(blockMetaData);
        }
    }
    Map<String, String> keyValueMetaData = new HashMap<String, String>();
    List<KeyValue> key_value_metadata = parquetMetadata.getKey_value_metadata();
    if (key_value_metadata != null) {
        for (KeyValue keyValue : key_value_metadata) {
            keyValueMetaData.put(keyValue.key, keyValue.value);
        }
    }
    return new ParquetMetadata(
            new parquet.hadoop.metadata.FileMetaData(messageType, keyValueMetaData, parquetMetadata.getCreated_by()),
            blocks);
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:50,代码来源:ParquetMetadataConverter.java

示例3: readFooter

import parquet.hadoop.metadata.BlockMetaData; //导入方法依赖的package包/类
public static ParquetMetadata readFooter(Configuration configuration, Path file)
        throws IOException
{
    FileSystem fileSystem = file.getFileSystem(configuration);
    FileStatus fileStatus = fileSystem.getFileStatus(file);
    try (FSDataInputStream inputStream = fileSystem.open(file)) {
        // Parquet File Layout:
        //
        // MAGIC
        // variable: Data
        // variable: Metadata
        // 4 bytes: MetadataLength
        // MAGIC

        long length = fileStatus.getLen();
        checkArgument(length >= MAGIC.length + PARQUET_METADATA_LENGTH + MAGIC.length, "%s is not a valid Parquet File", file);
        long metadataLengthIndex = length - PARQUET_METADATA_LENGTH - MAGIC.length;

        inputStream.seek(metadataLengthIndex);
        int metadataLength = readIntLittleEndian(inputStream);

        byte[] magic = new byte[MAGIC.length];
        inputStream.readFully(magic);
        checkArgument(Arrays.equals(MAGIC, magic),
                "Not valid Parquet file: %s expected magic number: %s got: %s", file, Arrays.toString(MAGIC), Arrays.toString(magic));

        long metadataIndex = metadataLengthIndex - metadataLength;
        checkArgument(metadataIndex >= MAGIC.length && metadataIndex < metadataLengthIndex,
                "Corrupted Parquet file: %s metadata index: %s out of range", file, metadataIndex);
        inputStream.seek(metadataIndex);
        FileMetaData fileMetaData = readFileMetaData(inputStream);
        List<SchemaElement> schema = fileMetaData.getSchema();
        checkArgument(!schema.isEmpty(), "Empty Parquet schema in file: %s", file);

        MessageType messageType = readParquetSchema(schema);
        List<BlockMetaData> blocks = new ArrayList<>();
        List<RowGroup> rowGroups = fileMetaData.getRow_groups();
        if (rowGroups != null) {
            for (RowGroup rowGroup : rowGroups) {
                BlockMetaData blockMetaData = new BlockMetaData();
                blockMetaData.setRowCount(rowGroup.getNum_rows());
                blockMetaData.setTotalByteSize(rowGroup.getTotal_byte_size());
                List<ColumnChunk> columns = rowGroup.getColumns();
                checkArgument(!columns.isEmpty(), "No columns in row group: %s", rowGroup);
                String filePath = columns.get(0).getFile_path();
                for (ColumnChunk columnChunk : columns) {
                    checkArgument((filePath == null && columnChunk.getFile_path() == null)
                                    || (filePath != null && filePath.equals(columnChunk.getFile_path())),
                            "all column chunks of the same row group must be in the same file");
                    ColumnMetaData metaData = columnChunk.meta_data;
                    String[] path = metaData.path_in_schema.toArray(new String[metaData.path_in_schema.size()]);
                    ColumnPath columnPath = ColumnPath.get(path);
                    ColumnChunkMetaData column = ColumnChunkMetaData.get(
                            columnPath,
                            messageType.getType(columnPath.toArray()).asPrimitiveType().getPrimitiveTypeName(),
                            CompressionCodecName.fromParquet(metaData.codec),
                            readEncodings(metaData.encodings),
                            readStats(metaData.statistics, messageType.getType(columnPath.toArray()).asPrimitiveType().getPrimitiveTypeName()),
                            metaData.data_page_offset,
                            metaData.dictionary_page_offset,
                            metaData.num_values,
                            metaData.total_compressed_size,
                            metaData.total_uncompressed_size);
                    blockMetaData.addColumn(column);
                }
                blockMetaData.setPath(filePath);
                blocks.add(blockMetaData);
            }
        }

        Map<String, String> keyValueMetaData = new HashMap<>();
        List<KeyValue> keyValueList = fileMetaData.getKey_value_metadata();
        if (keyValueList != null) {
            for (KeyValue keyValue : keyValueList) {
                keyValueMetaData.put(keyValue.key, keyValue.value);
            }
        }
        return new ParquetMetadata(new parquet.hadoop.metadata.FileMetaData(messageType, keyValueMetaData, fileMetaData.getCreated_by()), blocks);
    }
}
 
开发者ID:y-lan,项目名称:presto,代码行数:81,代码来源:ParquetMetadataReader.java


注:本文中的parquet.hadoop.metadata.BlockMetaData.setRowCount方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。