本文整理汇总了Java中org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics.configureGlobally方法的典型用法代码示例。如果您正苦于以下问题:Java SchemaMetrics.configureGlobally方法的具体用法?Java SchemaMetrics.configureGlobally怎么用?Java SchemaMetrics.configureGlobally使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics
的用法示例。
在下文中一共展示了SchemaMetrics.configureGlobally方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: HFileWriterV2
import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入方法依赖的package包/类
/** Constructor that takes a path, creates and closes the output stream. */
public HFileWriterV2(Configuration conf, CacheConfig cacheConf,
FileSystem fs, Path path, FSDataOutputStream ostream, int blockSize,
Compression.Algorithm compressAlgo, HFileDataBlockEncoder blockEncoder,
final KeyComparator comparator, final ChecksumType checksumType,
final int bytesPerChecksum, boolean includeMVCCReadpoint) throws IOException {
super(cacheConf,
ostream == null ? createOutputStream(conf, fs, path) : ostream,
path, blockSize, compressAlgo, blockEncoder, comparator);
SchemaMetrics.configureGlobally(conf);
this.checksumType = checksumType;
this.bytesPerChecksum = bytesPerChecksum;
this.includeMemstoreTS = includeMVCCReadpoint;
if (!conf.getBoolean(HConstants.HBASE_CHECKSUM_VERIFICATION, false)) {
this.minorVersion = 0;
}
finishInit(conf);
}
示例2: StoreFile
import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入方法依赖的package包/类
/**
* Constructor, loads a reader and it's indices, etc. May allocate a substantial amount of ram
* depending on the underlying files (10-20MB?).
* @param fs The current file system to use.
* @param p The path of the file.
* @param blockcache <code>true</code> if the block cache is enabled.
* @param conf The current configuration.
* @param cacheConf The cache configuration and block cache reference.
* @param cfBloomType The bloom type to use for this store file as specified by column family
* configuration. This may or may not be the same as the Bloom filter type actually
* present in the HFile, because column family configuration might change. If this is
* {@link BloomType#NONE}, the existing Bloom filter is ignored.
* @param dataBlockEncoder data block encoding algorithm.
* @throws IOException When opening the reader fails.
*/
public StoreFile(final FileSystem fs, final Path p, final Configuration conf,
final CacheConfig cacheConf, final BloomType cfBloomType,
final HFileDataBlockEncoder dataBlockEncoder) throws IOException {
this.fs = fs;
this.path = p;
this.cacheConf = cacheConf;
this.dataBlockEncoder =
dataBlockEncoder == null ? NoOpDataBlockEncoder.INSTANCE : dataBlockEncoder;
if (BloomFilterFactory.isGeneralBloomEnabled(conf)) {
this.cfBloomType = cfBloomType;
} else {
LOG.info("Ignoring bloom filter check for file " + path + ": " + "cfBloomType=" + cfBloomType
+ " (disabled in config)");
this.cfBloomType = BloomType.NONE;
}
// cache the modification time stamp of this store file
FileStatus[] stats = FSUtils.listStatus(fs, p, null);
if (stats != null && stats.length == 1) {
this.modificationTimeStamp = stats[0].getModificationTime();
} else {
this.modificationTimeStamp = 0;
}
SchemaMetrics.configureGlobally(conf);
initPossibleIndexesAndReference(fs, p, conf);
}
示例3: HFileWriterV1
import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入方法依赖的package包/类
/** Constructor that takes a path, creates and closes the output stream. */
public HFileWriterV1(Configuration conf, CacheConfig cacheConf,
FileSystem fs, Path path, FSDataOutputStream ostream,
int blockSize, Compression.Algorithm compress,
HFileDataBlockEncoder blockEncoder,
final KeyComparator comparator) throws IOException {
super(cacheConf, ostream == null ? createOutputStream(conf, fs, path) : ostream, path,
blockSize, compress, blockEncoder, comparator);
SchemaMetrics.configureGlobally(conf);
}
示例4: getWriterFactory
import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入方法依赖的package包/类
/**
* Returns the factory to be used to create {@link HFile} writers
*/
public static final WriterFactory getWriterFactory(Configuration conf, CacheConfig cacheConf) {
SchemaMetrics.configureGlobally(conf);
int version = getFormatVersion(conf);
switch (version) {
case 1:
return new HFileWriterV1.WriterFactoryV1(conf, cacheConf);
case 2:
return new HFileWriterV2.WriterFactoryV2(conf, cacheConf);
default:
throw new IllegalArgumentException("Cannot create writer for HFile " + "format version "
+ version);
}
}
示例5: setUp
import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入方法依赖的package包/类
@Before
public void setUp() throws IOException {
startingMetrics = SchemaMetrics.getMetricsSnapshot();
conf = TEST_UTIL.getConfiguration();
fs = FileSystem.get(conf);
SchemaMetrics.configureGlobally(conf);
}
示例6: getWriterFactory
import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入方法依赖的package包/类
/**
* Returns the factory to be used to create {@link HFile} writers
*/
public static final WriterFactory getWriterFactory(Configuration conf,
CacheConfig cacheConf) {
SchemaMetrics.configureGlobally(conf);
int version = getFormatVersion(conf);
switch (version) {
case 1:
return new HFileWriterV1.WriterFactoryV1(conf, cacheConf);
case 2:
return new HFileWriterV2.WriterFactoryV2(conf, cacheConf);
default:
throw new IllegalArgumentException("Cannot create writer for HFile " +
"format version " + version);
}
}
示例7: getHoplog
import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入方法依赖的package包/类
@Override
protected AbstractHoplog getHoplog(FileSystem fs, Path path) throws IOException {
SchemaMetrics.configureGlobally(fs.getConf());
return HFileSortedOplog.getHoplogForLoner(fs, path);
}
示例8: setUp
import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入方法依赖的package包/类
@Before
public void setUp() {
SchemaMetrics.configureGlobally(TEST_UTIL.getConfiguration());
}
示例9: setUp
import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入方法依赖的package包/类
/**
* Preparation before JUnit test.
*/
@Before
public void setUp() {
conf = TEST_UTIL.getConfiguration();
SchemaMetrics.configureGlobally(conf);
}
示例10: StoreFile
import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入方法依赖的package包/类
/**
* Constructor, loads a reader and it's indices, etc. May allocate a
* substantial amount of ram depending on the underlying files (10-20MB?).
*
* @param fs The current file system to use.
* @param p The path of the file.
* @param blockcache <code>true</code> if the block cache is enabled.
* @param conf The current configuration.
* @param cacheConf The cache configuration and block cache reference.
* @param cfBloomType The bloom type to use for this store file as specified
* by column family configuration. This may or may not be the same
* as the Bloom filter type actually present in the HFile, because
* column family configuration might change. If this is
* {@link BloomType#NONE}, the existing Bloom filter is ignored.
* @param dataBlockEncoder data block encoding algorithm.
* @throws IOException When opening the reader fails.
*/
public StoreFile(final FileSystem fs,
final Path p,
final Configuration conf,
final CacheConfig cacheConf,
final BloomType cfBloomType,
final HFileDataBlockEncoder dataBlockEncoder)
throws IOException {
this.fs = fs;
this.path = p;
Path tmpPath=getIndexPathFromPath(path);
if(fs.exists(tmpPath)){
this.indexPath=tmpPath;
this.hasIndex=true;
}
this.cacheConf = cacheConf;
this.dataBlockEncoder =
dataBlockEncoder == null ? NoOpDataBlockEncoder.INSTANCE
: dataBlockEncoder;
//TODO add link index file support
if (HFileLink.isHFileLink(p)) {
this.link = new HFileLink(conf, p);
LOG.debug("Store file " + p + " is a link");
} else if (isReference(p)) {
this.reference = Reference.read(fs, p);
this.referencePath = getReferredToFile(this.path);
if (HFileLink.isHFileLink(this.referencePath)) {
this.link = new HFileLink(conf, this.referencePath);
}
//index
tmpPath=getIndexPathFromPath(referencePath);
if(fs.exists(tmpPath)){
this.indexReferencePath=tmpPath;
this.hasIndex=true;
}
LOG.debug("Store file " + p + " is a " + reference.getFileRegion() +
" reference to " + this.referencePath);
} else if (!isHFile(p)) {
throw new IOException("path=" + path + " doesn't look like a valid StoreFile");
}
if (BloomFilterFactory.isGeneralBloomEnabled(conf)) {
this.cfBloomType = cfBloomType;
} else {
LOG.info("Ignoring bloom filter check for file " + path + ": " +
"cfBloomType=" + cfBloomType + " (disabled in config)");
this.cfBloomType = BloomType.NONE;
}
// cache the modification time stamp of this store file
FileStatus[] stats = FSUtils.listStatus(fs, p, null);
if (stats != null && stats.length == 1) {
this.modificationTimeStamp = stats[0].getModificationTime();
} else {
this.modificationTimeStamp = 0;
}
SchemaMetrics.configureGlobally(conf);
}
示例11: bulkLoadNewMacAddresses
import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入方法依赖的package包/类
private static void bulkLoadNewMacAddresses(
Configuration conf, String inputPath, String outputPath, String tblName)
throws Exception {
// Pass parameters to Mad Reduce
conf.set("hbase.table.name", tblName);
conf.set("macs", macAddressesLine);
// Workaround
SchemaMetrics.configureGlobally(conf);
// Load hbase-site.xml
HBaseConfiguration.addHbaseResources(conf);
// Create the job
Job job = new Job(conf, "Load macAddresses in bloomfilters table");
job.setJarByClass(MapperBulkLoadMacAddresses.class);
job.setMapperClass(MapperBulkLoadMacAddresses.class);
job.setMapOutputKeyClass(ImmutableBytesWritable.class);
job.setMapOutputValueClass(KeyValue.class);
job.setInputFormatClass(TextInputFormat.class);
// Get the table
HTable hTable = new HTable(conf, tblName);
// Auto configure partitioner and reducer
HFileOutputFormat.configureIncrementalLoad(job, hTable);
// Save output path and input path
FileInputFormat.addInputPath(job, new Path(inputPath));
FileOutputFormat.setOutputPath(job, new Path(outputPath));
// Wait for HFiles creations
job.waitForCompletion(true);
// Load generated HFiles into table
LoadIncrementalHFiles loader = new LoadIncrementalHFiles(conf);
loader.doBulkLoad(new Path(outputPath), hTable);
}
示例12: bulkLoadVectormap
import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入方法依赖的package包/类
/**
* Load radiomap in HBase
*
* @param conf
* @throws Exception
*/
private static void bulkLoadVectormap(
Configuration conf, String inputPath, String outputPath, String tblName)
throws Exception {
// Pass parameters to Mad Reduce
conf.set("hbase.table.name", tblName);
conf.set("macs", macAddressesLine);
// Workaround
SchemaMetrics.configureGlobally(conf);
// Load hbase-site.xml
HBaseConfiguration.addHbaseResources(conf);
// Create the job
Job job = new Job(conf, "Load radiomap in HBase");
job.setJarByClass(MapperBulkLoadRadiomap.class);
job.setMapperClass(MapperBulkLoadRadiomap.class);
job.setMapOutputKeyClass(ImmutableBytesWritable.class);
job.setMapOutputValueClass(KeyValue.class);
job.setInputFormatClass(TextInputFormat.class);
// Get the table
HTable hTable = new HTable(conf, tblName);
// Auto configure partitioner and reducer
HFileOutputFormat.configureIncrementalLoad(job, hTable);
// Save output path and input path
FileInputFormat.addInputPath(job, new Path(inputPath));
FileOutputFormat.setOutputPath(job, new Path(outputPath));
// Wait for HFiles creations
job.waitForCompletion(true);
// Load generated HFiles into table
LoadIncrementalHFiles loader = new LoadIncrementalHFiles(conf);
loader.doBulkLoad(new Path(outputPath), hTable);
}
示例13: StoreFile
import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入方法依赖的package包/类
/**
* Constructor, loads a reader and it's indices, etc. May allocate a
* substantial amount of ram depending on the underlying files (10-20MB?).
*
* @param fs The current file system to use.
* @param p The path of the file.
* @param blockcache <code>true</code> if the block cache is enabled.
* @param conf The current configuration.
* @param cacheConf The cache configuration and block cache reference.
* @param cfBloomType The bloom type to use for this store file as specified
* by column family configuration. This may or may not be the same
* as the Bloom filter type actually present in the HFile, because
* column family configuration might change. If this is
* {@link BloomType#NONE}, the existing Bloom filter is ignored.
* @param dataBlockEncoder data block encoding algorithm.
* @throws IOException When opening the reader fails.
*/
public StoreFile(final FileSystem fs,
final Path p,
final Configuration conf,
final CacheConfig cacheConf,
final BloomType cfBloomType,
final HFileDataBlockEncoder dataBlockEncoder)
throws IOException {
this.fs = fs;
this.path = p;
this.cacheConf = cacheConf;
this.dataBlockEncoder =
dataBlockEncoder == null ? NoOpDataBlockEncoder.INSTANCE
: dataBlockEncoder;
if (HFileLink.isHFileLink(p)) {
this.link = new HFileLink(conf, p);
LOG.debug("Store file " + p + " is a link");
} else if (isReference(p)) {
this.reference = Reference.read(fs, p);
this.referencePath = getReferredToFile(this.path);
if (HFileLink.isHFileLink(this.referencePath)) {
this.link = new HFileLink(conf, this.referencePath);
}
LOG.debug("Store file " + p + " is a " + reference.getFileRegion() +
" reference to " + this.referencePath);
} else if (!isHFile(p)) {
throw new IOException("path=" + path + " doesn't look like a valid StoreFile");
}
if (BloomFilterFactory.isGeneralBloomEnabled(conf)) {
this.cfBloomType = cfBloomType;
} else {
LOG.info("Ignoring bloom filter check for file " + path + ": " +
"cfBloomType=" + cfBloomType + " (disabled in config)");
this.cfBloomType = BloomType.NONE;
}
// cache the modification time stamp of this store file
FileStatus[] stats = FSUtils.listStatus(fs, p, null);
if (stats != null && stats.length == 1) {
this.modificationTimeStamp = stats[0].getModificationTime();
} else {
this.modificationTimeStamp = 0;
}
SchemaMetrics.configureGlobally(conf);
}
示例14: StoreFile
import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入方法依赖的package包/类
/**
* Constructor, loads a reader and it's indices, etc. May allocate a
* substantial amount of ram depending on the underlying files (10-20MB?).
*
* @param fs The current file system to use.
* @param p The path of the file.
* @param blockcache <code>true</code> if the block cache is enabled.
* @param conf The current configuration.
* @param cacheConf The cache configuration and block cache reference.
* @param cfBloomType The bloom type to use for this store file as specified
* by column family configuration. This may or may not be the same
* as the Bloom filter type actually present in the HFile, because
* column family configuration might change. If this is
* {@link BloomType#NONE}, the existing Bloom filter is ignored.
* @param dataBlockEncoder data block encoding algorithm.
* @throws IOException When opening the reader fails.
*/
public StoreFile(final FileSystem fs,
final Path p,
final Configuration conf,
final CacheConfig cacheConf,
final BloomType cfBloomType,
final HFileDataBlockEncoder dataBlockEncoder)
throws IOException {
this.fs = fs;
this.path = p;
this.cacheConf = cacheConf;
this.conf = conf;
useIndex = conf.getBoolean("hbase.use.secondary.index", false);
this.dataBlockEncoder =
dataBlockEncoder == null ? NoOpDataBlockEncoder.INSTANCE
: dataBlockEncoder;
if (HFileLink.isHFileLink(p)) {
this.link = new HFileLink(conf, p);
LOG.debug("Store file " + p + " is a link");
} else if (isReference(p)) {
this.reference = Reference.read(fs, p);
this.referencePath = getReferredToFile(this.path);
if (HFileLink.isHFileLink(this.referencePath)) {
this.link = new HFileLink(conf, this.referencePath);
}
LOG.debug("Store file " + p + " is a " + reference.getFileRegion() +
" reference to " + this.referencePath);
} else if (!isHFile(p)) {
throw new IOException("path=" + path + " doesn't look like a valid StoreFile");
}
if (BloomFilterFactory.isGeneralBloomEnabled(conf)) {
this.cfBloomType = cfBloomType;
} else {
LOG.info("Ignoring bloom filter check for file " + path + ": " +
"cfBloomType=" + cfBloomType + " (disabled in config)");
this.cfBloomType = BloomType.NONE;
}
// cache the modification time stamp of this store file
FileStatus[] stats = FSUtils.listStatus(fs, p, null);
if (stats != null && stats.length == 1) {
this.modificationTimeStamp = stats[0].getModificationTime();
} else {
this.modificationTimeStamp = 0;
}
SchemaMetrics.configureGlobally(conf);
}