本文整理汇总了Java中org.apache.hadoop.hbase.CompatibilitySingletonFactory.getInstance方法的典型用法代码示例。如果您正苦于以下问题:Java CompatibilitySingletonFactory.getInstance方法的具体用法?Java CompatibilitySingletonFactory.getInstance怎么用?Java CompatibilitySingletonFactory.getInstance使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.CompatibilitySingletonFactory
的用法示例。
在下文中一共展示了CompatibilitySingletonFactory.getInstance方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testCompareToHashCodeEquals
import org.apache.hadoop.hbase.CompatibilitySingletonFactory; //导入方法依赖的package包/类
@Test
public void testCompareToHashCodeEquals() throws Exception {
MetricsRegionServerSourceFactory fact = CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class);
MetricsRegionSource one = fact.createRegion(new RegionWrapperStub("TEST"));
MetricsRegionSource oneClone = fact.createRegion(new RegionWrapperStub("TEST"));
MetricsRegionSource two = fact.createRegion(new RegionWrapperStub("TWO"));
assertEquals(0, one.compareTo(oneClone));
assertEquals(one.hashCode(), oneClone.hashCode());
assertNotEquals(one, two);
assertTrue( one.compareTo(two) != 0);
assertTrue( two.compareTo(one) != 0);
assertTrue( two.compareTo(one) != one.compareTo(two));
assertTrue( two.compareTo(two) == 0);
}
示例2: MetricsRegionServerWrapperImpl
import org.apache.hadoop.hbase.CompatibilitySingletonFactory; //导入方法依赖的package包/类
public MetricsRegionServerWrapperImpl(final HRegionServer regionServer) {
this.regionServer = regionServer;
initBlockCache();
initMobFileCache();
this.period =
regionServer.conf.getLong(HConstants.REGIONSERVER_METRICS_PERIOD,
HConstants.DEFAULT_REGIONSERVER_METRICS_PERIOD);
this.executor = CompatibilitySingletonFactory.getInstance(MetricsExecutor.class).getExecutor();
this.runnable = new RegionServerMetricsWrapperRunnable();
this.executor.scheduleWithFixedDelay(this.runnable, this.period, this.period,
TimeUnit.MILLISECONDS);
this.metricsWALSource = CompatibilitySingletonFactory.getInstance(MetricsWALSource.class);
try {
this.dfsHedgedReadMetrics = FSUtils.getDFSHedgedReadMetrics(regionServer.getConfiguration());
} catch (IOException e) {
LOG.warn("Failed to get hedged metrics", e);
}
if (LOG.isInfoEnabled()) {
LOG.info("Computing regionserver metrics every " + this.period + " milliseconds");
}
}
示例3: testGetInstance
import org.apache.hadoop.hbase.CompatibilitySingletonFactory; //导入方法依赖的package包/类
@Test
public void testGetInstance() throws Exception {
MetricsWALSource walSource =
CompatibilitySingletonFactory.getInstance(MetricsWALSource.class);
assertTrue(walSource instanceof MetricsWALSourceImpl);
assertSame(walSource,
CompatibilitySingletonFactory.getInstance(MetricsWALSource.class));
}
示例4: createTestTaskAttemptContext
import org.apache.hadoop.hbase.CompatibilitySingletonFactory; //导入方法依赖的package包/类
private TaskAttemptContext createTestTaskAttemptContext(final Job job)
throws Exception {
HadoopShims hadoop = CompatibilitySingletonFactory.getInstance(HadoopShims.class);
TaskAttemptContext context = hadoop.createTestTaskAttemptContext(
job, "attempt_201402131733_0001_m_000000_0");
return context;
}
示例5: testBatchPut_whileNoRowLocksHeld
import org.apache.hadoop.hbase.CompatibilitySingletonFactory; //导入方法依赖的package包/类
@Test
public void testBatchPut_whileNoRowLocksHeld() throws IOException {
byte[] cf = Bytes.toBytes(COLUMN_FAMILY);
byte[] qual = Bytes.toBytes("qual");
byte[] val = Bytes.toBytes("val");
this.region = initHRegion(Bytes.toBytes(getName()), getName(), CONF, cf);
MetricsWALSource source = CompatibilitySingletonFactory.getInstance(MetricsWALSource.class);
try {
long syncs = metricsAssertHelper.getCounter("syncTimeNumOps", source);
metricsAssertHelper.assertCounter("syncTimeNumOps", syncs, source);
LOG.info("First a batch put with all valid puts");
final Put[] puts = new Put[10];
for (int i = 0; i < 10; i++) {
puts[i] = new Put(Bytes.toBytes("row_" + i));
puts[i].add(cf, qual, val);
}
OperationStatus[] codes = this.region.batchMutate(puts);
assertEquals(10, codes.length);
for (int i = 0; i < 10; i++) {
assertEquals(OperationStatusCode.SUCCESS, codes[i].getOperationStatusCode());
}
metricsAssertHelper.assertCounter("syncTimeNumOps", syncs + 1, source);
LOG.info("Next a batch put with one invalid family");
puts[5].add(Bytes.toBytes("BAD_CF"), qual, val);
codes = this.region.batchMutate(puts);
assertEquals(10, codes.length);
for (int i = 0; i < 10; i++) {
assertEquals((i == 5) ? OperationStatusCode.BAD_FAMILY : OperationStatusCode.SUCCESS,
codes[i].getOperationStatusCode());
}
metricsAssertHelper.assertCounter("syncTimeNumOps", syncs + 2, source);
} finally {
HRegion.closeHRegion(this.region);
this.region = null;
}
}
示例6: testGetInstance
import org.apache.hadoop.hbase.CompatibilitySingletonFactory; //导入方法依赖的package包/类
@Test
public void testGetInstance() throws Exception {
MetricsRegionServerSourceFactory metricsRegionServerSourceFactory =
CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class);
MetricsRegionServerSource serverSource =
metricsRegionServerSourceFactory.createServer(null);
assertTrue(serverSource instanceof MetricsRegionServerSourceImpl);
assertSame(metricsRegionServerSourceFactory,
CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class));
}
示例7: testGetInstance
import org.apache.hadoop.hbase.CompatibilitySingletonFactory; //导入方法依赖的package包/类
@Test
public void testGetInstance() throws Exception {
MetricsMasterSourceFactory metricsMasterSourceFactory = CompatibilitySingletonFactory
.getInstance(MetricsMasterSourceFactory.class);
MetricsMasterSource masterSource = metricsMasterSourceFactory.create(null);
assertTrue(masterSource instanceof MetricsMasterSourceImpl);
assertSame(metricsMasterSourceFactory, CompatibilitySingletonFactory.getInstance(MetricsMasterSourceFactory.class));
}
示例8: createTestTaskAttemptContext
import org.apache.hadoop.hbase.CompatibilitySingletonFactory; //导入方法依赖的package包/类
private TaskAttemptContext createTestTaskAttemptContext(final Job job)
throws IOException, Exception {
HadoopShims hadoop = CompatibilitySingletonFactory.getInstance(HadoopShims.class);
TaskAttemptContext context = hadoop.createTestTaskAttemptContext(
job, "attempt_201402131733_0001_m_000000_0");
return context;
}
示例9: testGetInstance
import org.apache.hadoop.hbase.CompatibilitySingletonFactory; //导入方法依赖的package包/类
@Test
public void testGetInstance() throws Exception {
MetricsMasterProcSourceFactory metricsMasterProcSourceFactory = CompatibilitySingletonFactory
.getInstance(MetricsMasterProcSourceFactory.class);
MetricsMasterProcSource masterProcSource = metricsMasterProcSourceFactory.create(null);
assertTrue(masterProcSource instanceof MetricsMasterProcSourceImpl);
assertSame(metricsMasterProcSourceFactory,
CompatibilitySingletonFactory.getInstance(MetricsMasterProcSourceFactory.class));
}
示例10: MetricsSource
import org.apache.hadoop.hbase.CompatibilitySingletonFactory; //导入方法依赖的package包/类
/**
* Constructor used to register the metrics
*
* @param id Name of the source this class is monitoring
*/
public MetricsSource(String id) {
this.id = id;
sizeOfLogQueKey = "source." + id + ".sizeOfLogQueue";
ageOfLastShippedOpKey = "source." + id + ".ageOfLastShippedOp";
logEditsReadKey = "source." + id + ".logEditsRead";
logEditsFilteredKey = "source." + id + ".logEditsFiltered";
shippedBatchesKey = "source." + this.id + ".shippedBatches";
shippedOpsKey = "source." + this.id + ".shippedOps";
shippedKBsKey = "source." + this.id + ".shippedKBs";
logReadInBytesKey = "source." + this.id + ".logReadInBytes";
rms = CompatibilitySingletonFactory.getInstance(MetricsReplicationSource.class);
}
示例11: testBatchPut_whileNoRowLocksHeld
import org.apache.hadoop.hbase.CompatibilitySingletonFactory; //导入方法依赖的package包/类
@Test
public void testBatchPut_whileNoRowLocksHeld() throws IOException {
final Put[] puts = new Put[10];
MetricsWALSource source = CompatibilitySingletonFactory.getInstance(MetricsWALSource.class);
try {
long syncs = prepareRegionForBachPut(puts, source, false);
OperationStatus[] codes = this.region.batchMutate(puts);
assertEquals(10, codes.length);
for (int i = 0; i < 10; i++) {
assertEquals(OperationStatusCode.SUCCESS, codes[i].getOperationStatusCode());
}
metricsAssertHelper.assertCounter("syncTimeNumOps", syncs + 1, source);
LOG.info("Next a batch put with one invalid family");
puts[5].addColumn(Bytes.toBytes("BAD_CF"), qual, value);
codes = this.region.batchMutate(puts);
assertEquals(10, codes.length);
for (int i = 0; i < 10; i++) {
assertEquals((i == 5) ? OperationStatusCode.BAD_FAMILY : OperationStatusCode.SUCCESS,
codes[i].getOperationStatusCode());
}
metricsAssertHelper.assertCounter("syncTimeNumOps", syncs + 2, source);
} finally {
HBaseTestingUtility.closeRegionAndWAL(this.region);
this.region = null;
}
}
示例12: testBatchPutWithTsSlop
import org.apache.hadoop.hbase.CompatibilitySingletonFactory; //导入方法依赖的package包/类
@Test
public void testBatchPutWithTsSlop() throws Exception {
byte[] b = Bytes.toBytes(getName());
byte[] cf = Bytes.toBytes(COLUMN_FAMILY);
byte[] qual = Bytes.toBytes("qual");
byte[] val = Bytes.toBytes("val");
// add data with a timestamp that is too recent for range. Ensure assert
conf.setInt("hbase.hregion.keyvalue.timestamp.slop.millisecs", 1000);
this.region = initHRegion(b, getName(), conf, cf);
try {
MetricsWALSource source = CompatibilitySingletonFactory.getInstance(MetricsWALSource.class);
long syncs = metricsAssertHelper.getCounter("syncTimeNumOps", source);
metricsAssertHelper.assertCounter("syncTimeNumOps", syncs, source);
final Put[] puts = new Put[10];
for (int i = 0; i < 10; i++) {
puts[i] = new Put(Bytes.toBytes("row_" + i), Long.MAX_VALUE - 100);
puts[i].add(cf, qual, val);
}
OperationStatus[] codes = this.region.batchMutate(puts);
assertEquals(10, codes.length);
for (int i = 0; i < 10; i++) {
assertEquals(OperationStatusCode.SANITY_CHECK_FAILURE, codes[i].getOperationStatusCode());
}
metricsAssertHelper.assertCounter("syncTimeNumOps", syncs, source);
} finally {
HRegion.closeHRegion(this.region);
this.region = null;
}
}
示例13: testCompareTo
import org.apache.hadoop.hbase.CompatibilitySingletonFactory; //导入方法依赖的package包/类
@Test
public void testCompareTo() throws Exception {
MetricsRegionServerSourceFactory fact = CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class);
MetricsRegionSource one = fact.createRegion(new RegionWrapperStub("TEST"));
MetricsRegionSource oneClone = fact.createRegion(new RegionWrapperStub("TEST"));
MetricsRegionSource two = fact.createRegion(new RegionWrapperStub("TWO"));
assertEquals(0, one.compareTo(oneClone));
assertTrue( one.compareTo(two) < 0);
assertTrue( two.compareTo(one) > 0);
}
示例14: testBatchPutWithTsSlop
import org.apache.hadoop.hbase.CompatibilitySingletonFactory; //导入方法依赖的package包/类
@Test
public void testBatchPutWithTsSlop() throws Exception {
byte[] b = Bytes.toBytes(getName());
byte[] cf = Bytes.toBytes(COLUMN_FAMILY);
byte[] qual = Bytes.toBytes("qual");
byte[] val = Bytes.toBytes("val");
// add data with a timestamp that is too recent for range. Ensure assert
CONF.setInt("hbase.hregion.keyvalue.timestamp.slop.millisecs", 1000);
this.region = initHRegion(b, getName(), CONF, cf);
try {
MetricsWALSource source = CompatibilitySingletonFactory.getInstance(MetricsWALSource.class);
long syncs = metricsAssertHelper.getCounter("syncTimeNumOps", source);
metricsAssertHelper.assertCounter("syncTimeNumOps", syncs, source);
final Put[] puts = new Put[10];
for (int i = 0; i < 10; i++) {
puts[i] = new Put(Bytes.toBytes("row_" + i), Long.MAX_VALUE - 100);
puts[i].add(cf, qual, val);
}
OperationStatus[] codes = this.region.batchMutate(puts);
assertEquals(10, codes.length);
for (int i = 0; i < 10; i++) {
assertEquals(OperationStatusCode.SANITY_CHECK_FAILURE, codes[i].getOperationStatusCode());
}
metricsAssertHelper.assertCounter("syncTimeNumOps", syncs, source);
} finally {
HRegion.closeHRegion(this.region);
this.region = null;
}
}
示例15: MetricsSnapshot
import org.apache.hadoop.hbase.CompatibilitySingletonFactory; //导入方法依赖的package包/类
public MetricsSnapshot() {
source = CompatibilitySingletonFactory.getInstance(MetricsSnapshotSource.class);
}