当前位置: 首页>>代码示例>>Java>>正文


Java Options.getOption方法代码示例

本文整理汇总了Java中org.apache.hadoop.util.Options.getOption方法的典型用法代码示例。如果您正苦于以下问题:Java Options.getOption方法的具体用法?Java Options.getOption怎么用?Java Options.getOption使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.util.Options的用法示例。


在下文中一共展示了Options.getOption方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: createWriter

import org.apache.hadoop.util.Options; //导入方法依赖的package包/类
/**
 * Create a new Writer with the given options.
 * @param conf the configuration to use
 * @param opts the options to create the file with
 * @return a new Writer
 * @throws IOException
 */
public static Writer createWriter(Configuration conf, Writer.Option... opts
                                  ) throws IOException {
  Writer.CompressionOption compressionOption = 
    Options.getOption(Writer.CompressionOption.class, opts);
  CompressionType kind;
  if (compressionOption != null) {
    kind = compressionOption.getValue();
  } else {
    kind = getDefaultCompressionType(conf);
    opts = Options.prependOptions(opts, Writer.compression(kind));
  }
  switch (kind) {
    default:
    case NONE:
      return new Writer(conf, opts);
    case RECORD:
      return new RecordCompressWriter(conf, opts);
    case BLOCK:
      return new BlockCompressWriter(conf, opts);
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:29,代码来源:SequenceFile.java

示例2: Reader

import org.apache.hadoop.util.Options; //导入方法依赖的package包/类
public Reader(Path dir, Configuration conf,
              SequenceFile.Reader.Option... opts) throws IOException {
  ComparatorOption comparatorOption = 
    Options.getOption(ComparatorOption.class, opts);
  WritableComparator comparator =
    comparatorOption == null ? null : comparatorOption.getValue();
  INDEX_SKIP = conf.getInt("io.map.index.skip", 0);
  open(dir, comparator, conf, opts);
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:10,代码来源:MapFile.java

示例3: Reader

import org.apache.hadoop.util.Options; //导入方法依赖的package包/类
public Reader(Configuration conf, Option... opts) throws IOException {
  // Look up the options, these are null if not set
  FileOption fileOpt = Options.getOption(FileOption.class, opts);
  InputStreamOption streamOpt = 
    Options.getOption(InputStreamOption.class, opts);
  StartOption startOpt = Options.getOption(StartOption.class, opts);
  LengthOption lenOpt = Options.getOption(LengthOption.class, opts);
  BufferSizeOption bufOpt = Options.getOption(BufferSizeOption.class,opts);
  OnlyHeaderOption headerOnly = 
    Options.getOption(OnlyHeaderOption.class, opts);
  // check for consistency
  if ((fileOpt == null) == (streamOpt == null)) {
    throw new 
      IllegalArgumentException("File or stream option must be specified");
  }
  if (fileOpt == null && bufOpt != null) {
    throw new IllegalArgumentException("buffer size can only be set when" +
                                       " a file is specified.");
  }
  // figure out the real values
  Path filename = null;
  FSDataInputStream file;
  final long len;
  if (fileOpt != null) {
    filename = fileOpt.getValue();
    FileSystem fs = filename.getFileSystem(conf);
    int bufSize = bufOpt == null ? getBufferSize(conf): bufOpt.getValue();
    len = null == lenOpt
      ? fs.getFileStatus(filename).getLen()
      : lenOpt.getValue();
    file = openFile(fs, filename, bufSize, len);
  } else {
    len = null == lenOpt ? Long.MAX_VALUE : lenOpt.getValue();
    file = streamOpt.getValue();
  }
  long start = startOpt == null ? 0 : startOpt.getValue();
  // really set up
  initialize(filename, file, start, len, conf, headerOnly != null);
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:40,代码来源:SequenceFile.java

示例4: Writer

import org.apache.hadoop.util.Options; //导入方法依赖的package包/类
Writer(Configuration conf, Option... opts) throws IOException {
  BlockSizeOption blockSizeOption =
      Options.getOption(BlockSizeOption.class, opts);
  BufferSizeOption bufferSizeOption =
      Options.getOption(BufferSizeOption.class, opts);
  ReplicationOption replicationOption =
      Options.getOption(ReplicationOption.class, opts);

  FileOption fileOption = Options.getOption(FileOption.class, opts);
  AppendIfExistsOption appendIfExistsOption = Options.getOption(
      AppendIfExistsOption.class, opts);
  StreamOption streamOption = Options.getOption(StreamOption.class, opts);

  // check consistency of options
  if ((fileOption == null) == (streamOption == null)) {
    throw new IllegalArgumentException("file or stream must be specified");
  }
  if (fileOption == null && (blockSizeOption != null ||
                             bufferSizeOption != null ||
                             replicationOption != null)) {
    throw new IllegalArgumentException("file modifier options not " +
                                       "compatible with stream");
  }

  FSDataOutputStream out;
  boolean ownStream = fileOption != null;
  if (ownStream) {
    Path p = fileOption.getValue();
    FileSystem fs;
    fs = p.getFileSystem(conf);
    int bufferSize = bufferSizeOption == null ? getBufferSize(conf) :
                     bufferSizeOption.getValue();
    short replication = replicationOption == null ?
                        fs.getDefaultReplication(p) :
                        (short) replicationOption.getValue();
    long blockSize = blockSizeOption == null ? fs.getDefaultBlockSize(p) :
                     blockSizeOption.getValue();

    if (appendIfExistsOption != null && appendIfExistsOption.getValue()
        && fs.exists(p)) {
      // Read the file and verify header details
      try (WALFile.Reader reader =
               new WALFile.Reader(conf, WALFile.Reader.file(p), new Reader.OnlyHeaderOption())){
        if (reader.getVersion() != VERSION[3]) {
          throw new VersionMismatchException(VERSION[3], reader.getVersion());
        }
        sync = reader.getSync();
      }
      out = fs.append(p, bufferSize);
      this.appendMode = true;
    } else {
      out = fs.create(p, true, bufferSize, replication, blockSize);
    }
  } else {
    out = streamOption.getValue();
  }

  init(conf, out, ownStream);
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:60,代码来源:WALFile.java

示例5: Reader

import org.apache.hadoop.util.Options; //导入方法依赖的package包/类
public Reader(Configuration conf, Option... opts) throws IOException {
  // Look up the options, these are null if not set
  FileOption fileOpt = Options.getOption(FileOption.class, opts);
  InputStreamOption streamOpt =
      Options.getOption(InputStreamOption.class, opts);
  StartOption startOpt = Options.getOption(StartOption.class, opts);
  LengthOption lenOpt = Options.getOption(LengthOption.class, opts);
  BufferSizeOption bufOpt = Options.getOption(BufferSizeOption.class, opts);
  OnlyHeaderOption headerOnly =
      Options.getOption(OnlyHeaderOption.class, opts);
  // check for consistency
  if ((fileOpt == null) == (streamOpt == null)) {
    throw new
        IllegalArgumentException("File or stream option must be specified");
  }
  if (fileOpt == null && bufOpt != null) {
    throw new IllegalArgumentException("buffer size can only be set when" +
                                       " a file is specified.");
  }
  // figure out the real values
  Path filename = null;
  FSDataInputStream file;
  final long len;
  if (fileOpt != null) {
    filename = fileOpt.getValue();
    FileSystem fs = filename.getFileSystem(conf);
    int bufSize = bufOpt == null ? getBufferSize(conf) : bufOpt.getValue();
    len = null == lenOpt
          ? fs.getFileStatus(filename).getLen()
          : lenOpt.getValue();
    file = openFile(fs, filename, bufSize, len);
  } else {
    len = null == lenOpt ? Long.MAX_VALUE : lenOpt.getValue();
    file = streamOpt.getValue();
  }
  long start = startOpt == null ? 0 : startOpt.getValue();
  // really set up
  initialize(filename, file, start, len, conf, headerOnly != null);
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:40,代码来源:WALFile.java

示例6: Reader

import org.apache.hadoop.util.Options; //导入方法依赖的package包/类
public Reader(Path dir, Configuration conf,
              SequenceFile.Reader.Option... opts) throws IOException {
  ComparatorOption comparatorOption = 
    Options.getOption(ComparatorOption.class, opts);
  WritableComparator comparator =
    comparatorOption == null ? null : comparatorOption.getValue();
  INDEX_SKIP = conf.getInt(
      IO_MAP_INDEX_SKIP_KEY, IO_MAP_INDEX_SKIP_DEFAULT);
  open(dir, comparator, conf, opts);
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:11,代码来源:MapFile.java

示例7: Writer

import org.apache.hadoop.util.Options; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
public Writer(Configuration conf, 
              Path dirName,
              SequenceFile.Writer.Option... opts
              ) throws IOException {
  KeyClassOption keyClassOption = 
    Options.getOption(KeyClassOption.class, opts);
  ComparatorOption comparatorOption =
    Options.getOption(ComparatorOption.class, opts);
  if ((keyClassOption == null) == (comparatorOption == null)) {
    throw new IllegalArgumentException("key class or comparator option "
                                       + "must be set");
  }
  this.indexInterval = conf.getInt(INDEX_INTERVAL, this.indexInterval);

  Class<? extends WritableComparable> keyClass;
  if (keyClassOption == null) {
    this.comparator = comparatorOption.getValue();
    keyClass = comparator.getKeyClass();
  } else {
    keyClass= 
      (Class<? extends WritableComparable>) keyClassOption.getValue();
    this.comparator = WritableComparator.get(keyClass, conf);
  }
  this.lastKey = comparator.newKey();
  FileSystem fs = dirName.getFileSystem(conf);

  if (!fs.mkdirs(dirName)) {
    throw new IOException("Mkdirs failed to create directory " + dirName);
  }
  Path dataFile = new Path(dirName, DATA_FILE_NAME);
  Path indexFile = new Path(dirName, INDEX_FILE_NAME);

  SequenceFile.Writer.Option[] dataOptions =
    Options.prependOptions(opts, 
                           SequenceFile.Writer.file(dataFile),
                           SequenceFile.Writer.keyClass(keyClass));
  this.data = SequenceFile.createWriter(conf, dataOptions);

  SequenceFile.Writer.Option[] indexOptions =
    Options.prependOptions(opts, SequenceFile.Writer.file(indexFile),
        SequenceFile.Writer.keyClass(keyClass),
        SequenceFile.Writer.valueClass(LongWritable.class),
        SequenceFile.Writer.compression(CompressionType.BLOCK));
  this.index = SequenceFile.createWriter(conf, indexOptions);      
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:47,代码来源:MapFile.java

示例8: Writer

import org.apache.hadoop.util.Options; //导入方法依赖的package包/类
/**
 * Construct a uncompressed writer from a set of options.
 * @param conf the configuration to use
 * @param options the options used when creating the writer
 * @throws IOException if it fails
 */
Writer(Configuration conf, 
       Option... opts) throws IOException {
  BlockSizeOption blockSizeOption = 
    Options.getOption(BlockSizeOption.class, opts);
  BufferSizeOption bufferSizeOption = 
    Options.getOption(BufferSizeOption.class, opts);
  ReplicationOption replicationOption = 
    Options.getOption(ReplicationOption.class, opts);
  ProgressableOption progressOption = 
    Options.getOption(ProgressableOption.class, opts);
  FileOption fileOption = Options.getOption(FileOption.class, opts);
  FileSystemOption fsOption = Options.getOption(FileSystemOption.class, opts);
  StreamOption streamOption = Options.getOption(StreamOption.class, opts);
  KeyClassOption keyClassOption = 
    Options.getOption(KeyClassOption.class, opts);
  ValueClassOption valueClassOption = 
    Options.getOption(ValueClassOption.class, opts);
  MetadataOption metadataOption = 
    Options.getOption(MetadataOption.class, opts);
  CompressionOption compressionTypeOption =
    Options.getOption(CompressionOption.class, opts);
  // check consistency of options
  if ((fileOption == null) == (streamOption == null)) {
    throw new IllegalArgumentException("file or stream must be specified");
  }
  if (fileOption == null && (blockSizeOption != null ||
                             bufferSizeOption != null ||
                             replicationOption != null ||
                             progressOption != null)) {
    throw new IllegalArgumentException("file modifier options not " +
                                       "compatible with stream");
  }

  FSDataOutputStream out;
  boolean ownStream = fileOption != null;
  if (ownStream) {
    Path p = fileOption.getValue();
    FileSystem fs;
    if (fsOption != null) {
      fs = fsOption.getValue();
    } else {
      fs = p.getFileSystem(conf);
    }
    int bufferSize = bufferSizeOption == null ? getBufferSize(conf) :
      bufferSizeOption.getValue();
    short replication = replicationOption == null ? 
      fs.getDefaultReplication(p) :
      (short) replicationOption.getValue();
    long blockSize = blockSizeOption == null ? fs.getDefaultBlockSize(p) :
      blockSizeOption.getValue();
    Progressable progress = progressOption == null ? null :
      progressOption.getValue();
    out = fs.create(p, true, bufferSize, replication, blockSize, progress);
  } else {
    out = streamOption.getValue();
  }
  Class<?> keyClass = keyClassOption == null ?
      Object.class : keyClassOption.getValue();
  Class<?> valueClass = valueClassOption == null ?
      Object.class : valueClassOption.getValue();
  Metadata metadata = metadataOption == null ?
      new Metadata() : metadataOption.getValue();
  this.compress = compressionTypeOption.getValue();
  final CompressionCodec codec = compressionTypeOption.getCodec();
  if (codec != null &&
      (codec instanceof GzipCodec) &&
      !NativeCodeLoader.isNativeCodeLoaded() &&
      !ZlibFactory.isNativeZlibLoaded(conf)) {
    throw new IllegalArgumentException("SequenceFile doesn't work with " +
                                       "GzipCodec without native-hadoop " +
                                       "code!");
  }
  init(conf, out, ownStream, keyClass, valueClass, codec, metadata);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:81,代码来源:SequenceFile.java

示例9: Writer

import org.apache.hadoop.util.Options; //导入方法依赖的package包/类
/**
 * Construct a uncompressed writer from a set of options.
 * @param conf the configuration to use
 * @param opts the options used when creating the writer
 * @throws IOException if it fails
 */
Writer(Configuration conf, 
       Option... opts) throws IOException {
  BlockSizeOption blockSizeOption = 
    Options.getOption(BlockSizeOption.class, opts);
  BufferSizeOption bufferSizeOption = 
    Options.getOption(BufferSizeOption.class, opts);
  ReplicationOption replicationOption = 
    Options.getOption(ReplicationOption.class, opts);
  ProgressableOption progressOption = 
    Options.getOption(ProgressableOption.class, opts);
  FileOption fileOption = Options.getOption(FileOption.class, opts);
  FileSystemOption fsOption = Options.getOption(FileSystemOption.class, opts);
  StreamOption streamOption = Options.getOption(StreamOption.class, opts);
  KeyClassOption keyClassOption = 
    Options.getOption(KeyClassOption.class, opts);
  ValueClassOption valueClassOption = 
    Options.getOption(ValueClassOption.class, opts);
  MetadataOption metadataOption = 
    Options.getOption(MetadataOption.class, opts);
  CompressionOption compressionTypeOption =
    Options.getOption(CompressionOption.class, opts);
  // check consistency of options
  if ((fileOption == null) == (streamOption == null)) {
    throw new IllegalArgumentException("file or stream must be specified");
  }
  if (fileOption == null && (blockSizeOption != null ||
                             bufferSizeOption != null ||
                             replicationOption != null ||
                             progressOption != null)) {
    throw new IllegalArgumentException("file modifier options not " +
                                       "compatible with stream");
  }

  FSDataOutputStream out;
  boolean ownStream = fileOption != null;
  if (ownStream) {
    Path p = fileOption.getValue();
    FileSystem fs;
    if (fsOption != null) {
      fs = fsOption.getValue();
    } else {
      fs = p.getFileSystem(conf);
    }
    int bufferSize = bufferSizeOption == null ? getBufferSize(conf) :
      bufferSizeOption.getValue();
    short replication = replicationOption == null ? 
      fs.getDefaultReplication(p) :
      (short) replicationOption.getValue();
    long blockSize = blockSizeOption == null ? fs.getDefaultBlockSize(p) :
      blockSizeOption.getValue();
    Progressable progress = progressOption == null ? null :
      progressOption.getValue();
    out = fs.create(p, true, bufferSize, replication, blockSize, progress);
  } else {
    out = streamOption.getValue();
  }
  Class<?> keyClass = keyClassOption == null ?
      Object.class : keyClassOption.getValue();
  Class<?> valueClass = valueClassOption == null ?
      Object.class : valueClassOption.getValue();
  Metadata metadata = metadataOption == null ?
      new Metadata() : metadataOption.getValue();
  this.compress = compressionTypeOption.getValue();
  final CompressionCodec codec = compressionTypeOption.getCodec();
  if (codec != null &&
      (codec instanceof GzipCodec) &&
      !NativeCodeLoader.isNativeCodeLoaded() &&
      !ZlibFactory.isNativeZlibLoaded(conf)) {
    throw new IllegalArgumentException("SequenceFile doesn't work with " +
                                       "GzipCodec without native-hadoop " +
                                       "code!");
  }
  init(conf, out, ownStream, keyClass, valueClass, codec, metadata);
}
 
开发者ID:gemxd,项目名称:gemfirexd-oss,代码行数:81,代码来源:SequenceFile.java

示例10: Writer

import org.apache.hadoop.util.Options; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
public Writer(Configuration conf, 
              Path dirName,
              SequenceFile.Writer.Option... opts
              ) throws IOException {
  KeyClassOption keyClassOption = 
    Options.getOption(KeyClassOption.class, opts);
  ComparatorOption comparatorOption =
    Options.getOption(ComparatorOption.class, opts);
  if ((keyClassOption == null) == (comparatorOption == null)) {
    throw new IllegalArgumentException("key class or comparator option "
                                       + "must be set");
  }
  this.indexInterval = conf.getInt(INDEX_INTERVAL, this.indexInterval);

  Class<? extends WritableComparable> keyClass;
  if (keyClassOption == null) {
    this.comparator = comparatorOption.getValue();
    keyClass = comparator.getKeyClass();
  } else {
    keyClass= 
      (Class<? extends WritableComparable>) keyClassOption.getValue();
    this.comparator = WritableComparator.get(keyClass);
  }
  this.lastKey = comparator.newKey();
  FileSystem fs = dirName.getFileSystem(conf);

  if (!fs.mkdirs(dirName)) {
    throw new IOException("Mkdirs failed to create directory " + dirName);
  }
  Path dataFile = new Path(dirName, DATA_FILE_NAME);
  Path indexFile = new Path(dirName, INDEX_FILE_NAME);

  SequenceFile.Writer.Option[] dataOptions =
    Options.prependOptions(opts, 
                           SequenceFile.Writer.file(dataFile),
                           SequenceFile.Writer.keyClass(keyClass));
  this.data = SequenceFile.createWriter(conf, dataOptions);

  SequenceFile.Writer.Option[] indexOptions =
    Options.prependOptions(opts, SequenceFile.Writer.file(indexFile),
        SequenceFile.Writer.keyClass(keyClass),
        SequenceFile.Writer.valueClass(LongWritable.class),
        SequenceFile.Writer.compression(CompressionType.BLOCK));
  this.index = SequenceFile.createWriter(conf, indexOptions);      
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:47,代码来源:MapFile.java


注:本文中的org.apache.hadoop.util.Options.getOption方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。