当前位置: 首页>>代码示例>>Java>>正文


Java SchemaMetrics.configureGlobally方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics.configureGlobally方法的典型用法代码示例。如果您正苦于以下问题:Java SchemaMetrics.configureGlobally方法的具体用法?Java SchemaMetrics.configureGlobally怎么用?Java SchemaMetrics.configureGlobally使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics的用法示例。


在下文中一共展示了SchemaMetrics.configureGlobally方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: HFileWriterV2

import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入方法依赖的package包/类
/** Constructor that takes a path, creates and closes the output stream. */
public HFileWriterV2(Configuration conf, CacheConfig cacheConf,
    FileSystem fs, Path path, FSDataOutputStream ostream, int blockSize,
    Compression.Algorithm compressAlgo, HFileDataBlockEncoder blockEncoder,
    final KeyComparator comparator, final ChecksumType checksumType,
    final int bytesPerChecksum, boolean includeMVCCReadpoint) throws IOException {
  super(cacheConf,
      ostream == null ? createOutputStream(conf, fs, path) : ostream,
      path, blockSize, compressAlgo, blockEncoder, comparator);
  SchemaMetrics.configureGlobally(conf);
  this.checksumType = checksumType;
  this.bytesPerChecksum = bytesPerChecksum;
  this.includeMemstoreTS = includeMVCCReadpoint;
  if (!conf.getBoolean(HConstants.HBASE_CHECKSUM_VERIFICATION, false)) {
    this.minorVersion = 0;
  }
  finishInit(conf);
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:19,代码来源:HFileWriterV2.java

示例2: StoreFile

import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入方法依赖的package包/类
/**
 * Constructor, loads a reader and it's indices, etc. May allocate a substantial amount of ram
 * depending on the underlying files (10-20MB?).
 * @param fs The current file system to use.
 * @param p The path of the file.
 * @param blockcache <code>true</code> if the block cache is enabled.
 * @param conf The current configuration.
 * @param cacheConf The cache configuration and block cache reference.
 * @param cfBloomType The bloom type to use for this store file as specified by column family
 *          configuration. This may or may not be the same as the Bloom filter type actually
 *          present in the HFile, because column family configuration might change. If this is
 *          {@link BloomType#NONE}, the existing Bloom filter is ignored.
 * @param dataBlockEncoder data block encoding algorithm.
 * @throws IOException When opening the reader fails.
 */
public StoreFile(final FileSystem fs, final Path p, final Configuration conf,
    final CacheConfig cacheConf, final BloomType cfBloomType,
    final HFileDataBlockEncoder dataBlockEncoder) throws IOException {
  this.fs = fs;
  this.path = p;
  this.cacheConf = cacheConf;
  this.dataBlockEncoder =
      dataBlockEncoder == null ? NoOpDataBlockEncoder.INSTANCE : dataBlockEncoder;
  if (BloomFilterFactory.isGeneralBloomEnabled(conf)) {
    this.cfBloomType = cfBloomType;
  } else {
    LOG.info("Ignoring bloom filter check for file " + path + ": " + "cfBloomType=" + cfBloomType
        + " (disabled in config)");
    this.cfBloomType = BloomType.NONE;
  }

  // cache the modification time stamp of this store file
  FileStatus[] stats = FSUtils.listStatus(fs, p, null);
  if (stats != null && stats.length == 1) {
    this.modificationTimeStamp = stats[0].getModificationTime();
  } else {
    this.modificationTimeStamp = 0;
  }
  SchemaMetrics.configureGlobally(conf);
  initPossibleIndexesAndReference(fs, p, conf);
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:42,代码来源:StoreFile.java

示例3: HFileWriterV1

import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入方法依赖的package包/类
/** Constructor that takes a path, creates and closes the output stream. */
public HFileWriterV1(Configuration conf, CacheConfig cacheConf,
    FileSystem fs, Path path, FSDataOutputStream ostream,
    int blockSize, Compression.Algorithm compress,
    HFileDataBlockEncoder blockEncoder,
    final KeyComparator comparator) throws IOException {
  super(cacheConf, ostream == null ? createOutputStream(conf, fs, path) : ostream, path,
      blockSize, compress, blockEncoder, comparator);
  SchemaMetrics.configureGlobally(conf);
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:11,代码来源:HFileWriterV1.java

示例4: getWriterFactory

import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入方法依赖的package包/类
/**
 * Returns the factory to be used to create {@link HFile} writers
 */
public static final WriterFactory getWriterFactory(Configuration conf, CacheConfig cacheConf) {
  SchemaMetrics.configureGlobally(conf);
  int version = getFormatVersion(conf);
  switch (version) {
  case 1:
    return new HFileWriterV1.WriterFactoryV1(conf, cacheConf);
  case 2:
    return new HFileWriterV2.WriterFactoryV2(conf, cacheConf);
  default:
    throw new IllegalArgumentException("Cannot create writer for HFile " + "format version "
        + version);
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:17,代码来源:HFile.java

示例5: setUp

import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入方法依赖的package包/类
@Before
public void setUp() throws IOException {
  startingMetrics = SchemaMetrics.getMetricsSnapshot();
  conf = TEST_UTIL.getConfiguration();
  fs = FileSystem.get(conf);
  SchemaMetrics.configureGlobally(conf);
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:8,代码来源:TestHFileReaderV1.java

示例6: getWriterFactory

import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入方法依赖的package包/类
/**
 * Returns the factory to be used to create {@link HFile} writers
 */
public static final WriterFactory getWriterFactory(Configuration conf,
    CacheConfig cacheConf) {
  SchemaMetrics.configureGlobally(conf);
  int version = getFormatVersion(conf);
  switch (version) {
  case 1:
    return new HFileWriterV1.WriterFactoryV1(conf, cacheConf);
  case 2:
    return new HFileWriterV2.WriterFactoryV2(conf, cacheConf);
  default:
    throw new IllegalArgumentException("Cannot create writer for HFile " +
        "format version " + version);
  }
}
 
开发者ID:wanhao,项目名称:IRIndex,代码行数:18,代码来源:HFile.java

示例7: getHoplog

import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入方法依赖的package包/类
@Override
protected AbstractHoplog getHoplog(FileSystem fs, Path path) throws IOException {
  SchemaMetrics.configureGlobally(fs.getConf());
  return HFileSortedOplog.getHoplogForLoner(fs, path); 
}
 
开发者ID:gemxd,项目名称:gemfirexd-oss,代码行数:6,代码来源:RWSplitIterator.java

示例8: setUp

import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入方法依赖的package包/类
@Before
public void setUp() {
  SchemaMetrics.configureGlobally(TEST_UTIL.getConfiguration());
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:5,代码来源:TestMultiColumnScanner.java

示例9: setUp

import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入方法依赖的package包/类
/**
 * Preparation before JUnit test.
 */
@Before
public void setUp() {
  conf = TEST_UTIL.getConfiguration();
  SchemaMetrics.configureGlobally(conf);
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:9,代码来源:TestHFileDataBlockEncoder.java

示例10: StoreFile

import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入方法依赖的package包/类
/**
 * Constructor, loads a reader and it's indices, etc. May allocate a
 * substantial amount of ram depending on the underlying files (10-20MB?).
 *
 * @param fs  The current file system to use.
 * @param p  The path of the file.
 * @param blockcache  <code>true</code> if the block cache is enabled.
 * @param conf  The current configuration.
 * @param cacheConf  The cache configuration and block cache reference.
 * @param cfBloomType The bloom type to use for this store file as specified
 *          by column family configuration. This may or may not be the same
 *          as the Bloom filter type actually present in the HFile, because
 *          column family configuration might change. If this is
 *          {@link BloomType#NONE}, the existing Bloom filter is ignored.
 * @param dataBlockEncoder data block encoding algorithm.
 * @throws IOException When opening the reader fails.
 */
public StoreFile(final FileSystem fs,
          final Path p,
          final Configuration conf,
          final CacheConfig cacheConf,
          final BloomType cfBloomType,
          final HFileDataBlockEncoder dataBlockEncoder)
    throws IOException {
  this.fs = fs;
  this.path = p;
  
  Path tmpPath=getIndexPathFromPath(path);
  if(fs.exists(tmpPath)){
    this.indexPath=tmpPath;
    this.hasIndex=true;
  }
  
  this.cacheConf = cacheConf;
  this.dataBlockEncoder =
      dataBlockEncoder == null ? NoOpDataBlockEncoder.INSTANCE
          : dataBlockEncoder;

  //TODO add link index file support
  if (HFileLink.isHFileLink(p)) {
    this.link = new HFileLink(conf, p);
    LOG.debug("Store file " + p + " is a link");
  } else if (isReference(p)) {
    this.reference = Reference.read(fs, p);
    this.referencePath = getReferredToFile(this.path);
    if (HFileLink.isHFileLink(this.referencePath)) {
      this.link = new HFileLink(conf, this.referencePath);
    }
    //index
    tmpPath=getIndexPathFromPath(referencePath);
    if(fs.exists(tmpPath)){
      this.indexReferencePath=tmpPath;
      this.hasIndex=true;
    }
    LOG.debug("Store file " + p + " is a " + reference.getFileRegion() +
      " reference to " + this.referencePath);
  } else if (!isHFile(p)) {
    throw new IOException("path=" + path + " doesn't look like a valid StoreFile");
  }

  if (BloomFilterFactory.isGeneralBloomEnabled(conf)) {
    this.cfBloomType = cfBloomType;
  } else {
    LOG.info("Ignoring bloom filter check for file " + path + ": " +
        "cfBloomType=" + cfBloomType + " (disabled in config)");
    this.cfBloomType = BloomType.NONE;
  }

  // cache the modification time stamp of this store file
  FileStatus[] stats = FSUtils.listStatus(fs, p, null);
  if (stats != null && stats.length == 1) {
    this.modificationTimeStamp = stats[0].getModificationTime();
  } else {
    this.modificationTimeStamp = 0;
  }

  SchemaMetrics.configureGlobally(conf);
}
 
开发者ID:wanhao,项目名称:IRIndex,代码行数:79,代码来源:StoreFile.java

示例11: bulkLoadNewMacAddresses

import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入方法依赖的package包/类
private static void bulkLoadNewMacAddresses(
        Configuration conf, String inputPath, String outputPath, String tblName)
        throws Exception {

    // Pass parameters to Mad Reduce
    conf.set("hbase.table.name", tblName);
    conf.set("macs", macAddressesLine);

    // Workaround
    SchemaMetrics.configureGlobally(conf);

    // Load hbase-site.xml
    HBaseConfiguration.addHbaseResources(conf);

    // Create the job
    Job job = new Job(conf, "Load macAddresses in bloomfilters table");

    job.setJarByClass(MapperBulkLoadMacAddresses.class);
    job.setMapperClass(MapperBulkLoadMacAddresses.class);
    job.setMapOutputKeyClass(ImmutableBytesWritable.class);
    job.setMapOutputValueClass(KeyValue.class);

    job.setInputFormatClass(TextInputFormat.class);

    // Get the table
    HTable hTable = new HTable(conf, tblName);

    // Auto configure partitioner and reducer
    HFileOutputFormat.configureIncrementalLoad(job, hTable);

    // Save output path and input path
    FileInputFormat.addInputPath(job, new Path(inputPath));
    FileOutputFormat.setOutputPath(job, new Path(outputPath));

    // Wait for HFiles creations
    job.waitForCompletion(true);

    // Load generated HFiles into table
    LoadIncrementalHFiles loader = new LoadIncrementalHFiles(conf);
    loader.doBulkLoad(new Path(outputPath), hTable);
}
 
开发者ID:dmsl,项目名称:tvm,代码行数:42,代码来源:RadiomapLoader.java

示例12: bulkLoadVectormap

import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入方法依赖的package包/类
/**
 * Load radiomap in HBase
 *
 * @param conf
 * @throws Exception
 */
private static void bulkLoadVectormap(
        Configuration conf, String inputPath, String outputPath, String tblName)
        throws Exception {

    // Pass parameters to Mad Reduce
    conf.set("hbase.table.name", tblName);
    conf.set("macs", macAddressesLine);

    // Workaround
    SchemaMetrics.configureGlobally(conf);

    // Load hbase-site.xml
    HBaseConfiguration.addHbaseResources(conf);

    // Create the job
    Job job = new Job(conf, "Load radiomap in HBase");

    job.setJarByClass(MapperBulkLoadRadiomap.class);
    job.setMapperClass(MapperBulkLoadRadiomap.class);
    job.setMapOutputKeyClass(ImmutableBytesWritable.class);
    job.setMapOutputValueClass(KeyValue.class);

    job.setInputFormatClass(TextInputFormat.class);

    // Get the table
    HTable hTable = new HTable(conf, tblName);

    // Auto configure partitioner and reducer
    HFileOutputFormat.configureIncrementalLoad(job, hTable);

    // Save output path and input path
    FileInputFormat.addInputPath(job, new Path(inputPath));
    FileOutputFormat.setOutputPath(job, new Path(outputPath));

    // Wait for HFiles creations
    job.waitForCompletion(true);

    // Load generated HFiles into table
    LoadIncrementalHFiles loader = new LoadIncrementalHFiles(conf);
    loader.doBulkLoad(new Path(outputPath), hTable);
}
 
开发者ID:dmsl,项目名称:tvm,代码行数:48,代码来源:RadiomapLoader.java

示例13: StoreFile

import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入方法依赖的package包/类
/**
 * Constructor, loads a reader and it's indices, etc. May allocate a
 * substantial amount of ram depending on the underlying files (10-20MB?).
 *
 * @param fs  The current file system to use.
 * @param p  The path of the file.
 * @param blockcache  <code>true</code> if the block cache is enabled.
 * @param conf  The current configuration.
 * @param cacheConf  The cache configuration and block cache reference.
 * @param cfBloomType The bloom type to use for this store file as specified
 *          by column family configuration. This may or may not be the same
 *          as the Bloom filter type actually present in the HFile, because
 *          column family configuration might change. If this is
 *          {@link BloomType#NONE}, the existing Bloom filter is ignored.
 * @param dataBlockEncoder data block encoding algorithm.
 * @throws IOException When opening the reader fails.
 */
public StoreFile(final FileSystem fs,
          final Path p,
          final Configuration conf,
          final CacheConfig cacheConf,
          final BloomType cfBloomType,
          final HFileDataBlockEncoder dataBlockEncoder)
    throws IOException {
  this.fs = fs;
  this.path = p;
  this.cacheConf = cacheConf;
  this.dataBlockEncoder =
      dataBlockEncoder == null ? NoOpDataBlockEncoder.INSTANCE
          : dataBlockEncoder;

  if (HFileLink.isHFileLink(p)) {
    this.link = new HFileLink(conf, p);
    LOG.debug("Store file " + p + " is a link");
  } else if (isReference(p)) {
    this.reference = Reference.read(fs, p);
    this.referencePath = getReferredToFile(this.path);
    if (HFileLink.isHFileLink(this.referencePath)) {
      this.link = new HFileLink(conf, this.referencePath);
    }
    LOG.debug("Store file " + p + " is a " + reference.getFileRegion() +
      " reference to " + this.referencePath);
  } else if (!isHFile(p)) {
    throw new IOException("path=" + path + " doesn't look like a valid StoreFile");
  }

  if (BloomFilterFactory.isGeneralBloomEnabled(conf)) {
    this.cfBloomType = cfBloomType;
  } else {
    LOG.info("Ignoring bloom filter check for file " + path + ": " +
        "cfBloomType=" + cfBloomType + " (disabled in config)");
    this.cfBloomType = BloomType.NONE;
  }

  // cache the modification time stamp of this store file
  FileStatus[] stats = FSUtils.listStatus(fs, p, null);
  if (stats != null && stats.length == 1) {
    this.modificationTimeStamp = stats[0].getModificationTime();
  } else {
    this.modificationTimeStamp = 0;
  }

  SchemaMetrics.configureGlobally(conf);
}
 
开发者ID:zwqjsj0404,项目名称:HBase-Research,代码行数:65,代码来源:StoreFile.java

示例14: StoreFile

import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入方法依赖的package包/类
/**
 * Constructor, loads a reader and it's indices, etc. May allocate a
 * substantial amount of ram depending on the underlying files (10-20MB?).
 *
 * @param fs  The current file system to use.
 * @param p  The path of the file.
 * @param blockcache  <code>true</code> if the block cache is enabled.
 * @param conf  The current configuration.
 * @param cacheConf  The cache configuration and block cache reference.
 * @param cfBloomType The bloom type to use for this store file as specified
 *          by column family configuration. This may or may not be the same
 *          as the Bloom filter type actually present in the HFile, because
 *          column family configuration might change. If this is
 *          {@link BloomType#NONE}, the existing Bloom filter is ignored.
 * @param dataBlockEncoder data block encoding algorithm.
 * @throws IOException When opening the reader fails.
 */
public StoreFile(final FileSystem fs,
          final Path p,
          final Configuration conf,
          final CacheConfig cacheConf,
          final BloomType cfBloomType,
          final HFileDataBlockEncoder dataBlockEncoder)
    throws IOException {
  this.fs = fs;
  this.path = p;
  this.cacheConf = cacheConf;
  this.conf = conf;
  useIndex = conf.getBoolean("hbase.use.secondary.index", false);
  this.dataBlockEncoder =
      dataBlockEncoder == null ? NoOpDataBlockEncoder.INSTANCE
          : dataBlockEncoder;

  if (HFileLink.isHFileLink(p)) {
    this.link = new HFileLink(conf, p);
    LOG.debug("Store file " + p + " is a link");
  } else if (isReference(p)) {
    this.reference = Reference.read(fs, p);
    this.referencePath = getReferredToFile(this.path);
    if (HFileLink.isHFileLink(this.referencePath)) {
      this.link = new HFileLink(conf, this.referencePath);
    }
    LOG.debug("Store file " + p + " is a " + reference.getFileRegion() +
      " reference to " + this.referencePath);
  } else if (!isHFile(p)) {
    throw new IOException("path=" + path + " doesn't look like a valid StoreFile");
  }

  if (BloomFilterFactory.isGeneralBloomEnabled(conf)) {
    this.cfBloomType = cfBloomType;
  } else {
    LOG.info("Ignoring bloom filter check for file " + path + ": " +
        "cfBloomType=" + cfBloomType + " (disabled in config)");
    this.cfBloomType = BloomType.NONE;
  }

  // cache the modification time stamp of this store file
  FileStatus[] stats = FSUtils.listStatus(fs, p, null);
  if (stats != null && stats.length == 1) {
    this.modificationTimeStamp = stats[0].getModificationTime();
  } else {
    this.modificationTimeStamp = 0;
  }

  SchemaMetrics.configureGlobally(conf);
}
 
开发者ID:Huawei-Hadoop,项目名称:hindex,代码行数:67,代码来源:StoreFile.java


注:本文中的org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics.configureGlobally方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。