本文整理汇总了Java中parquet.hadoop.metadata.CompressionCodecName.GZIP属性的典型用法代码示例。如果您正苦于以下问题:Java CompressionCodecName.GZIP属性的具体用法?Java CompressionCodecName.GZIP怎么用?Java CompressionCodecName.GZIP使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类parquet.hadoop.metadata.CompressionCodecName
的用法示例。
在下文中一共展示了CompressionCodecName.GZIP属性的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: init
@Override
public void init(Map<String, String> writerOptions) throws IOException {
this.location = writerOptions.get("location");
this.prefix = writerOptions.get("prefix");
conf = new Configuration();
conf.set(FileSystem.FS_DEFAULT_NAME_KEY, writerOptions.get(FileSystem.FS_DEFAULT_NAME_KEY));
blockSize = Integer.parseInt(writerOptions.get(ExecConstants.PARQUET_BLOCK_SIZE));
pageSize = Integer.parseInt(writerOptions.get(ExecConstants.PARQUET_PAGE_SIZE));
dictionaryPageSize= Integer.parseInt(writerOptions.get(ExecConstants.PARQUET_DICT_PAGE_SIZE));
String codecName = writerOptions.get(ExecConstants.PARQUET_WRITER_COMPRESSION_TYPE).toLowerCase();
switch(codecName) {
case "snappy":
codec = CompressionCodecName.SNAPPY;
break;
case "lzo":
codec = CompressionCodecName.LZO;
break;
case "gzip":
codec = CompressionCodecName.GZIP;
break;
case "none":
case "uncompressed":
codec = CompressionCodecName.UNCOMPRESSED;
break;
default:
throw new UnsupportedOperationException(String.format("Unknown compression type: %s", codecName));
}
enableDictionary = Boolean.parseBoolean(writerOptions.get(ExecConstants.PARQUET_WRITER_ENABLE_DICTIONARY_ENCODING));
}
示例2: HMAvroParquetScheme
public HMAvroParquetScheme(Schema schema, String compression) {
this(schema);
if(compression.equalsIgnoreCase("snappy")) {
codec = CompressionCodecName.SNAPPY;
} else if(compression.equalsIgnoreCase("gzip")) {
codec = CompressionCodecName.GZIP;
}
}
示例3: run
public int run(String[] args) throws Exception {
if(args.length < 2) {
LOG.error("Usage: " + getClass().getName() + " INPUTFILE OUTPUTFILE [compression]");
return 1;
}
String inputFile = args[0];
String outputFile = args[1];
String compression = (args.length > 2) ? args[2] : "none";
Path parquetFilePath = null;
// Find a file in case a directory was passed
RemoteIterator<LocatedFileStatus> it = FileSystem.get(getConf()).listFiles(new Path(inputFile), true);
while(it.hasNext()) {
FileStatus fs = it.next();
if(fs.isFile()) {
parquetFilePath = fs.getPath();
break;
}
}
if(parquetFilePath == null) {
LOG.error("No file found for " + inputFile);
return 1;
}
LOG.info("Getting schema from " + parquetFilePath);
ParquetMetadata readFooter = ParquetFileReader.readFooter(getConf(), parquetFilePath);
MessageType schema = readFooter.getFileMetaData().getSchema();
LOG.info(schema);
GroupWriteSupport.setSchema(schema, getConf());
Job job = new Job(getConf());
job.setJarByClass(getClass());
job.setJobName(getClass().getName());
job.setMapperClass(ReadRequestMap.class);
job.setNumReduceTasks(0);
job.setInputFormatClass(ExampleInputFormat.class);
job.setOutputFormatClass(ExampleOutputFormat.class);
CompressionCodecName codec = CompressionCodecName.UNCOMPRESSED;
if(compression.equalsIgnoreCase("snappy")) {
codec = CompressionCodecName.SNAPPY;
} else if(compression.equalsIgnoreCase("gzip")) {
codec = CompressionCodecName.GZIP;
}
LOG.info("Output compression: " + codec);
ExampleOutputFormat.setCompression(job, codec);
FileInputFormat.setInputPaths(job, new Path(inputFile));
FileOutputFormat.setOutputPath(job, new Path(outputFile));
job.waitForCompletion(true);
return 0;
}