当前位置: 首页>>代码示例>>Java>>正文


Java ParquetProperties.WriterVersion方法代码示例

本文整理汇总了Java中org.apache.parquet.column.ParquetProperties.WriterVersion方法的典型用法代码示例。如果您正苦于以下问题:Java ParquetProperties.WriterVersion方法的具体用法?Java ParquetProperties.WriterVersion怎么用?Java ParquetProperties.WriterVersion使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.parquet.column.ParquetProperties的用法示例。


在下文中一共展示了ParquetProperties.WriterVersion方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: writeAndTest

import org.apache.parquet.column.ParquetProperties; //导入方法依赖的package包/类
public static void writeAndTest(WriteContext context) throws IOException {
  // Create the configuration, and then apply the schema to our configuration.
  Configuration configuration = new Configuration();
  GroupWriteSupport.setSchema(context.schema, configuration);
  GroupWriteSupport groupWriteSupport = new GroupWriteSupport();

  // Create the writer properties
  final int blockSize = context.blockSize;
  final int pageSize = context.pageSize;
  final int dictionaryPageSize = pageSize;
  final boolean enableDictionary = context.enableDictionary;
  final boolean enableValidation = context.enableValidation;
  ParquetProperties.WriterVersion writerVersion = context.version;
  CompressionCodecName codec = CompressionCodecName.UNCOMPRESSED;

  ParquetWriter<Group> writer = new ParquetWriter<Group>(context.fsPath,
      groupWriteSupport, codec, blockSize, pageSize, dictionaryPageSize,
      enableDictionary, enableValidation, writerVersion, configuration);

  context.write(writer);
  writer.close();

  context.test();

  context.path.delete();
}
 
开发者ID:apache,项目名称:parquet-mr,代码行数:27,代码来源:TestStatistics.java

示例2: ParquetElementWriter

import org.apache.parquet.column.ParquetProperties; //导入方法依赖的package包/类
@Deprecated
ParquetElementWriter(final Path file, final WriteSupport<Element> writeSupport,
                     final CompressionCodecName compressionCodecName,
                     final int blockSize, final int pageSize, final boolean enableDictionary,
                     final boolean enableValidation,
                     final ParquetProperties.WriterVersion writerVersion,
                     final Configuration conf)
        throws IOException {
    super(file, writeSupport, compressionCodecName, blockSize, pageSize,
            pageSize, enableDictionary, enableValidation, writerVersion, conf);
}
 
开发者ID:gchq,项目名称:Gaffer,代码行数:12,代码来源:ParquetElementWriter.java

示例3: WriteContext

import org.apache.parquet.column.ParquetProperties; //导入方法依赖的package包/类
public WriteContext(File path, MessageType schema, int blockSize, int pageSize, boolean enableDictionary, boolean enableValidation, ParquetProperties.WriterVersion version) throws IOException {
  this.path = path;
  this.fsPath = new Path(path.toString());
  this.schema = schema;
  this.blockSize = blockSize;
  this.pageSize = pageSize;
  this.enableDictionary = enableDictionary;
  this.enableValidation = enableValidation;
  this.version = version;
}
 
开发者ID:apache,项目名称:parquet-mr,代码行数:11,代码来源:TestStatistics.java

示例4: generateData

import org.apache.parquet.column.ParquetProperties; //导入方法依赖的package包/类
public void generateData(Path outFile, Configuration configuration, ParquetProperties.WriterVersion version,
                         int blockSize, int pageSize, int fixedLenByteArraySize, CompressionCodecName codec, int nRows)
        throws IOException
{
  if (exists(configuration, outFile)) {
    System.out.println("File already exists " + outFile);
    return;
  }

  System.out.println("Generating data @ " + outFile);

  MessageType schema = parseMessageType(
          "message test { "
                  + "required binary binary_field; "
                  + "required int32 int32_field; "
                  + "required int64 int64_field; "
                  + "required boolean boolean_field; "
                  + "required float float_field; "
                  + "required double double_field; "
                  + "required fixed_len_byte_array(" + fixedLenByteArraySize +") flba_field; "
                  + "required int96 int96_field; "
                  + "} ");

  GroupWriteSupport.setSchema(schema, configuration);
  SimpleGroupFactory f = new SimpleGroupFactory(schema);
  ParquetWriter<Group> writer = new ParquetWriter<Group>(outFile, new GroupWriteSupport(), codec, blockSize,
                                                         pageSize, DICT_PAGE_SIZE, true, false, version, configuration);

  //generate some data for the fixed len byte array field
  char[] chars = new char[fixedLenByteArraySize];
  Arrays.fill(chars, '*');

  for (int i = 0; i < nRows; i++) {
    writer.write(
      f.newGroup()
        .append("binary_field", randomUUID().toString())
        .append("int32_field", i)
        .append("int64_field", 64l)
        .append("boolean_field", true)
        .append("float_field", 1.0f)
        .append("double_field", 2.0d)
        .append("flba_field", new String(chars))
        .append("int96_field", Binary.fromConstantByteArray(new byte[12]))
    );
  }
  writer.close();
}
 
开发者ID:apache,项目名称:parquet-mr,代码行数:48,代码来源:DataGenerator.java

示例5: DataContext

import org.apache.parquet.column.ParquetProperties; //导入方法依赖的package包/类
public DataContext(long seed, File path, int blockSize, int pageSize, boolean enableDictionary, ParquetProperties.WriterVersion version) throws IOException {
  super(path, buildSchema(seed), blockSize, pageSize, enableDictionary, true, version);

  this.random = new Random(seed);
  this.recordCount = random.nextInt(MAX_TOTAL_ROWS);

  int fixedLength = schema.getType("fixed-binary").asPrimitiveType().getTypeLength();

  randomGenerators = Arrays.<RandomValueGenerator<?>>asList(
      new RandomValues.IntGenerator(random.nextLong()),
      new RandomValues.LongGenerator(random.nextLong()),
      new RandomValues.Int96Generator(random.nextLong()),
      new RandomValues.FloatGenerator(random.nextLong()),
      new RandomValues.DoubleGenerator(random.nextLong()),
      new RandomValues.StringGenerator(random.nextLong()),
      new RandomValues.BinaryGenerator(random.nextLong()),
      new RandomValues.FixedGenerator(random.nextLong(), fixedLength),
      new RandomValues.UnconstrainedIntGenerator(random.nextLong()),
      new RandomValues.UnconstrainedLongGenerator(random.nextLong()),
      new RandomValues.UnconstrainedFloatGenerator(random.nextLong()),
      new RandomValues.UnconstrainedDoubleGenerator(random.nextLong()),
      new RandomValues.IntGenerator(random.nextLong(), Byte.MIN_VALUE, Byte.MAX_VALUE),
      new RandomValues.UIntGenerator(random.nextLong(), Byte.MIN_VALUE, Byte.MAX_VALUE),
      new RandomValues.IntGenerator(random.nextLong(), Short.MIN_VALUE, Short.MAX_VALUE),
      new RandomValues.UIntGenerator(random.nextLong(), Short.MIN_VALUE, Short.MAX_VALUE),
      new RandomValues.UnconstrainedIntGenerator(random.nextLong()),
      new RandomValues.UnconstrainedIntGenerator(random.nextLong()),
      new RandomValues.UnconstrainedLongGenerator(random.nextLong()),
      new RandomValues.UnconstrainedLongGenerator(random.nextLong()),
      new RandomValues.UnconstrainedIntGenerator(random.nextLong()),
      new RandomValues.UnconstrainedLongGenerator(random.nextLong()),
      new RandomValues.FixedGenerator(random.nextLong(), fixedLength),
      new RandomValues.BinaryGenerator(random.nextLong()),
      new RandomValues.StringGenerator(random.nextLong()),
      new RandomValues.StringGenerator(random.nextLong()),
      new RandomValues.StringGenerator(random.nextLong()),
      new RandomValues.BinaryGenerator(random.nextLong()),
      new RandomValues.IntGenerator(random.nextLong()),
      new RandomValues.IntGenerator(random.nextLong()),
      new RandomValues.LongGenerator(random.nextLong()),
      new RandomValues.LongGenerator(random.nextLong()),
      new RandomValues.LongGenerator(random.nextLong()),
      new RandomValues.FixedGenerator(random.nextLong(), 12)
  );
}
 
开发者ID:apache,项目名称:parquet-mr,代码行数:46,代码来源:TestStatistics.java

示例6: SimpleParquetWriter

import org.apache.parquet.column.ParquetProperties; //导入方法依赖的package包/类
/**
 * Create a new ParquetWriter.
 *
 * @param file                 the file to create
 * @param writeSupport         the implementation to write a record to a RecordConsumer
 * @param compressionCodecName the compression codec to use
 * @param blockSize            the block size threshold
 * @param pageSize             the page size threshold
 * @param dictionaryPageSize   the page size threshold for the dictionary pages
 * @param enableDictionary     to turn dictionary encoding on
 * @param validating           to turn on validation using the schema
 * @param writerVersion        version of parquetWriter from {@link ParquetProperties.WriterVersion}
 * @param conf                 Hadoop configuration to use while accessing the filesystem
 * @throws IOException
 */
public SimpleParquetWriter(Path file,
                           WriteSupport<Group> writeSupport,
                           CompressionCodecName compressionCodecName,
                           int blockSize, int pageSize, int dictionaryPageSize,
                           boolean enableDictionary, boolean validating,
                           ParquetProperties.WriterVersion writerVersion,
                           Configuration conf) throws IOException
{
    super(file, writeSupport, compressionCodecName, blockSize, pageSize, dictionaryPageSize, enableDictionary, validating, writerVersion, conf);
}
 
开发者ID:dbiir,项目名称:RealtimeAnalysis,代码行数:26,代码来源:SimpleParquetWriter.java

示例7: ExampleParquetWriter

import org.apache.parquet.column.ParquetProperties; //导入方法依赖的package包/类
/**
 * Create a new {@link ExampleParquetWriter}.
 *
 * @param file The file name to write to.
 * @param writeSupport The schema to write with.
 * @param compressionCodecName Compression code to use, or CompressionCodecName.UNCOMPRESSED
 * @param blockSize the block size threshold.
 * @param pageSize See parquet write up. Blocks are subdivided into pages for alignment and other purposes.
 * @param enableDictionary Whether to use a dictionary to compress columns.
 * @param conf The Configuration to use.
 * @throws IOException
 */
ExampleParquetWriter(Path file, WriteSupport<Group> writeSupport,
                     CompressionCodecName compressionCodecName,
                     int blockSize, int pageSize, boolean enableDictionary,
                     boolean enableValidation,
                     ParquetProperties.WriterVersion writerVersion,
                     Configuration conf)
    throws IOException {
  super(file, writeSupport, compressionCodecName, blockSize, pageSize,
      pageSize, enableDictionary, enableValidation, writerVersion, conf);
}
 
开发者ID:apache,项目名称:parquet-mr,代码行数:23,代码来源:ExampleParquetWriter.java


注:本文中的org.apache.parquet.column.ParquetProperties.WriterVersion方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。