本文整理汇总了Java中parquet.hadoop.util.ContextUtil.getConfiguration方法的典型用法代码示例。如果您正苦于以下问题:Java ContextUtil.getConfiguration方法的具体用法?Java ContextUtil.getConfiguration怎么用?Java ContextUtil.getConfiguration使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类parquet.hadoop.util.ContextUtil
的用法示例。
在下文中一共展示了ContextUtil.getConfiguration方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getSplits
import parquet.hadoop.util.ContextUtil; //导入方法依赖的package包/类
/**
* {@inheritDoc}
*/
@Override
public List<InputSplit> getSplits(JobContext jobContext) throws IOException {
Configuration configuration = ContextUtil.getConfiguration(jobContext);
List<InputSplit> splits = new ArrayList<InputSplit>();
if (isTaskSideMetaData(configuration)) {
// Although not required by the API, some clients may depend on always
// receiving ParquetInputSplit. Translation is required at some point.
for (InputSplit split : super.getSplits(jobContext)) {
Preconditions.checkArgument(split instanceof FileSplit,
"Cannot wrap non-FileSplit: " + split);
splits.add(ParquetInputSplit.from((FileSplit) split));
}
return splits;
} else {
splits.addAll(getSplits(configuration, getFooters(jobContext)));
}
return splits;
}
示例2: commitJob
import parquet.hadoop.util.ContextUtil; //导入方法依赖的package包/类
@Override
public void commitJob(JobContext jobContext) throws IOException {
super.commitJob(jobContext);
Configuration conf = ContextUtil.getConfiguration(jobContext);
Path outputPath = FileOutputFormat.getOutputPath(new JobConf(conf));
ParquetOutputCommitter.writeMetaDataFile(conf, outputPath);
}
示例3: setUnboundRecordFilter
import parquet.hadoop.util.ContextUtil; //导入方法依赖的package包/类
public static void setUnboundRecordFilter(Job job, Class<? extends UnboundRecordFilter> filterClass) {
Configuration conf = ContextUtil.getConfiguration(job);
checkArgument(getFilterPredicate(conf) == null,
"You cannot provide an UnboundRecordFilter after providing a FilterPredicate");
conf.set(UNBOUND_RECORD_FILTER, filterClass.getName());
}
示例4: createRecordReader
import parquet.hadoop.util.ContextUtil; //导入方法依赖的package包/类
/**
* {@inheritDoc}
*/
@Override
public RecordReader<Void, T> createRecordReader(
InputSplit inputSplit,
TaskAttemptContext taskAttemptContext) throws IOException, InterruptedException {
Configuration conf = ContextUtil.getConfiguration(taskAttemptContext);
ReadSupport<T> readSupport = getReadSupport(conf);
return new ParquetRecordReader<T>(readSupport, getFilter(conf));
}
示例5: commitJob
import parquet.hadoop.util.ContextUtil; //导入方法依赖的package包/类
public void commitJob(JobContext jobContext) throws IOException {
super.commitJob(jobContext);
Configuration configuration = ContextUtil.getConfiguration(jobContext);
writeMetaDataFile(configuration, outputPath);
}
示例6: getFooters
import parquet.hadoop.util.ContextUtil; //导入方法依赖的package包/类
/**
* @param jobContext the current job context
* @return the footers for the files
* @throws IOException
*/
public List<Footer> getFooters(JobContext jobContext) throws IOException {
List<FileStatus> statuses = listStatus(jobContext);
if (statuses.isEmpty()) {
return Collections.emptyList();
}
Configuration config = ContextUtil.getConfiguration(jobContext);
List<Footer> footers = new ArrayList<Footer>(statuses.size());
Set<FileStatus> missingStatuses = new HashSet<FileStatus>();
Map<Path, FileStatusWrapper> missingStatusesMap =
new HashMap<Path, FileStatusWrapper>(missingStatuses.size());
if (footersCache == null) {
footersCache =
new LruCache<FileStatusWrapper, FootersCacheValue>(Math.max(statuses.size(), MIN_FOOTER_CACHE_SIZE));
}
for (FileStatus status : statuses) {
FileStatusWrapper statusWrapper = new FileStatusWrapper(status);
FootersCacheValue cacheEntry =
footersCache.getCurrentValue(statusWrapper);
if (Log.DEBUG) {
LOG.debug("Cache entry " + (cacheEntry == null ? "not " : "")
+ " found for '" + status.getPath() + "'");
}
if (cacheEntry != null) {
footers.add(cacheEntry.getFooter());
} else {
missingStatuses.add(status);
missingStatusesMap.put(status.getPath(), statusWrapper);
}
}
if (Log.DEBUG) {
LOG.debug("found " + footers.size() + " footers in cache and adding up "
+ "to " + missingStatuses.size() + " missing footers to the cache");
}
if (missingStatuses.isEmpty()) {
return footers;
}
List<Footer> newFooters = getFooters(config, missingStatuses);
for (Footer newFooter : newFooters) {
// Use the original file status objects to make sure we store a
// conservative (older) modification time (i.e. in case the files and
// footers were modified and it's not clear which version of the footers
// we have)
FileStatusWrapper fileStatus = missingStatusesMap.get(newFooter.getFile());
footersCache.put(fileStatus, new FootersCacheValue(fileStatus, newFooter));
}
footers.addAll(newFooters);
return footers;
}
示例7: getConfiguration
import parquet.hadoop.util.ContextUtil; //导入方法依赖的package包/类
@Override
public Configuration getConfiguration() {
return ContextUtil.getConfiguration(context);
}