本文整理汇总了Java中org.apache.hadoop.hbase.regionserver.DefaultStoreEngine类的典型用法代码示例。如果您正苦于以下问题:Java DefaultStoreEngine类的具体用法?Java DefaultStoreEngine怎么用?Java DefaultStoreEngine使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
DefaultStoreEngine类属于org.apache.hadoop.hbase.regionserver包,在下文中一共展示了DefaultStoreEngine类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testCompactionWithoutThroughputLimit
import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine; //导入依赖的package包/类
private long testCompactionWithoutThroughputLimit() throws Exception {
Configuration conf = TEST_UTIL.getConfiguration();
conf.set(StoreEngine.STORE_ENGINE_CLASS_KEY, DefaultStoreEngine.class.getName());
conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, 100);
conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_KEY, 200);
conf.setInt(HStore.BLOCKING_STOREFILES_KEY, 10000);
conf.set(CompactionThroughputControllerFactory.HBASE_THROUGHPUT_CONTROLLER_KEY,
NoLimitCompactionThroughputController.class.getName());
TEST_UTIL.startMiniCluster(1);
try {
Store store = prepareData();
assertEquals(10, store.getStorefilesCount());
long startTime = System.currentTimeMillis();
TEST_UTIL.getHBaseAdmin().majorCompact(tableName);
while (store.getStorefilesCount() != 1) {
Thread.sleep(20);
}
return System.currentTimeMillis() - startTime;
} finally {
TEST_UTIL.shutdownMiniCluster();
}
}
示例2: prepareData
import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine; //导入依赖的package包/类
private HStore prepareData() throws IOException {
Admin admin = TEST_UTIL.getAdmin();
TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName)
.setValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY,
FIFOCompactionPolicy.class.getName())
.setValue(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
DisabledRegionSplitPolicy.class.getName())
.addColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(family).setTimeToLive(1).build())
.build();
admin.createTable(desc);
Table table = TEST_UTIL.getConnection().getTable(tableName);
TimeOffsetEnvironmentEdge edge =
(TimeOffsetEnvironmentEdge) EnvironmentEdgeManager.getDelegate();
for (int i = 0; i < 10; i++) {
for (int j = 0; j < 10; j++) {
byte[] value = new byte[128 * 1024];
ThreadLocalRandom.current().nextBytes(value);
table.put(new Put(Bytes.toBytes(i * 10 + j)).addColumn(family, qualifier, value));
}
admin.flush(tableName);
edge.increment(1001);
}
return getStoreWithName(tableName);
}
示例3: testSanityCheckBlockingStoreFiles
import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine; //导入依赖的package包/类
@Test
public void testSanityCheckBlockingStoreFiles() throws IOException {
error.expect(DoNotRetryIOException.class);
error.expectMessage("Blocking file count 'hbase.hstore.blockingStoreFiles'");
error.expectMessage("is below recommended minimum of 1000 for column family");
TableName tableName = TableName.valueOf(getClass().getSimpleName() + "-BlockingStoreFiles");
TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName)
.setValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY,
FIFOCompactionPolicy.class.getName())
.setValue(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
DisabledRegionSplitPolicy.class.getName())
.setValue(HStore.BLOCKING_STOREFILES_KEY, "10")
.addColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(family).setTimeToLive(1).build())
.build();
TEST_UTIL.getAdmin().createTable(desc);
}
示例4: testCompactionWithoutThroughputLimit
import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine; //导入依赖的package包/类
private long testCompactionWithoutThroughputLimit() throws Exception {
Configuration conf = TEST_UTIL.getConfiguration();
conf.set(StoreEngine.STORE_ENGINE_CLASS_KEY, DefaultStoreEngine.class.getName());
conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, 100);
conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_KEY, 200);
conf.setInt(HStore.BLOCKING_STOREFILES_KEY, 10000);
conf.set(CompactionThroughputControllerFactory.HBASE_THROUGHPUT_CONTROLLER_KEY,
NoLimitThroughputController.class.getName());
TEST_UTIL.startMiniCluster(1);
try {
HStore store = prepareData();
assertEquals(10, store.getStorefilesCount());
long startTime = System.currentTimeMillis();
TEST_UTIL.getAdmin().majorCompact(tableName);
while (store.getStorefilesCount() != 1) {
Thread.sleep(20);
}
return System.currentTimeMillis() - startTime;
} finally {
TEST_UTIL.shutdownMiniCluster();
}
}
示例5: prepareData
import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine; //导入依赖的package包/类
private Store prepareData() throws IOException {
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
if (admin.tableExists(tableName)) {
admin.disableTable(tableName);
admin.deleteTable(tableName);
}
HTableDescriptor desc = new HTableDescriptor(tableName);
desc.setConfiguration(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY,
FIFOCompactionPolicy.class.getName());
desc.setConfiguration(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
DisabledRegionSplitPolicy.class.getName());
HColumnDescriptor colDesc = new HColumnDescriptor(family);
colDesc.setTimeToLive(1); // 1 sec
desc.addFamily(colDesc);
admin.createTable(desc);
Table table = TEST_UTIL.getConnection().getTable(tableName);
Random rand = new Random();
TimeOffsetEnvironmentEdge edge =
(TimeOffsetEnvironmentEdge) EnvironmentEdgeManager.getDelegate();
for (int i = 0; i < 10; i++) {
for (int j = 0; j < 10; j++) {
byte[] value = new byte[128 * 1024];
rand.nextBytes(value);
table.put(new Put(Bytes.toBytes(i * 10 + j)).addColumn(family, qualifier, value));
}
admin.flush(tableName);
edge.increment(1001);
}
return getStoreWithName(tableName);
}
示例6: testSanityCheckTTL
import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine; //导入依赖的package包/类
@Test
public void testSanityCheckTTL() throws Exception
{
Configuration conf = TEST_UTIL.getConfiguration();
conf.setInt(HStore.BLOCKING_STOREFILES_KEY, 10000);
TEST_UTIL.startMiniCluster(1);
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
String tableName = this.tableName.getNameAsString()+"-TTL";
if (admin.tableExists(tableName)) {
admin.disableTable(tableName);
admin.deleteTable(tableName);
}
HTableDescriptor desc = new HTableDescriptor(tableName);
desc.setConfiguration(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY,
FIFOCompactionPolicy.class.getName());
desc.setConfiguration(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
DisabledRegionSplitPolicy.class.getName());
HColumnDescriptor colDesc = new HColumnDescriptor(family);
desc.addFamily(colDesc);
try{
admin.createTable(desc);
Assert.fail();
}catch(Exception e){
}finally{
TEST_UTIL.shutdownMiniCluster();
}
}
示例7: testSanityCheckMinVersion
import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine; //导入依赖的package包/类
@Test
public void testSanityCheckMinVersion() throws Exception
{
Configuration conf = TEST_UTIL.getConfiguration();
conf.setInt(HStore.BLOCKING_STOREFILES_KEY, 10000);
TEST_UTIL.startMiniCluster(1);
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
String tableName = this.tableName.getNameAsString()+"-MinVersion";
if (admin.tableExists(tableName)) {
admin.disableTable(tableName);
admin.deleteTable(tableName);
}
HTableDescriptor desc = new HTableDescriptor(tableName);
desc.setConfiguration(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY,
FIFOCompactionPolicy.class.getName());
desc.setConfiguration(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
DisabledRegionSplitPolicy.class.getName());
HColumnDescriptor colDesc = new HColumnDescriptor(family);
colDesc.setTimeToLive(1); // 1 sec
colDesc.setMinVersions(1);
desc.addFamily(colDesc);
try{
admin.createTable(desc);
Assert.fail();
}catch(Exception e){
}finally{
TEST_UTIL.shutdownMiniCluster();
}
}
示例8: testSanityCheckBlockingStoreFiles
import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine; //导入依赖的package包/类
@Test
public void testSanityCheckBlockingStoreFiles() throws Exception
{
Configuration conf = TEST_UTIL.getConfiguration();
conf.setInt(HStore.BLOCKING_STOREFILES_KEY, 10);
TEST_UTIL.startMiniCluster(1);
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
String tableName = this.tableName.getNameAsString()+"-MinVersion";
if (admin.tableExists(tableName)) {
admin.disableTable(tableName);
admin.deleteTable(tableName);
}
HTableDescriptor desc = new HTableDescriptor(tableName);
desc.setConfiguration(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY,
FIFOCompactionPolicy.class.getName());
desc.setConfiguration(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
DisabledRegionSplitPolicy.class.getName());
HColumnDescriptor colDesc = new HColumnDescriptor(family);
colDesc.setTimeToLive(1); // 1 sec
desc.addFamily(colDesc);
try{
admin.createTable(desc);
Assert.fail();
}catch(Exception e){
}finally{
TEST_UTIL.shutdownMiniCluster();
}
}
示例9: testCompactionWithThroughputLimit
import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine; //导入依赖的package包/类
private long testCompactionWithThroughputLimit() throws Exception {
long throughputLimit = 1024L * 1024;
Configuration conf = TEST_UTIL.getConfiguration();
conf.set(StoreEngine.STORE_ENGINE_CLASS_KEY, DefaultStoreEngine.class.getName());
conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, 100);
conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_KEY, 200);
conf.setInt(HStore.BLOCKING_STOREFILES_KEY, 10000);
conf.setLong(
PressureAwareCompactionThroughputController.HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_HIGHER_BOUND,
throughputLimit);
conf.setLong(
PressureAwareCompactionThroughputController.HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_LOWER_BOUND,
throughputLimit);
conf.set(CompactionThroughputControllerFactory.HBASE_THROUGHPUT_CONTROLLER_KEY,
PressureAwareCompactionThroughputController.class.getName());
TEST_UTIL.startMiniCluster(1);
try {
Store store = prepareData();
assertEquals(10, store.getStorefilesCount());
long startTime = System.currentTimeMillis();
TEST_UTIL.getHBaseAdmin().majorCompact(tableName);
while (store.getStorefilesCount() != 1) {
Thread.sleep(20);
}
long duration = System.currentTimeMillis() - startTime;
double throughput = (double) store.getStorefilesSize() / duration * 1000;
// confirm that the speed limit work properly(not too fast, and also not too slow)
// 20% is the max acceptable error rate.
assertTrue(throughput < throughputLimit * 1.2);
assertTrue(throughput > throughputLimit * 0.8);
return System.currentTimeMillis() - startTime;
} finally {
TEST_UTIL.shutdownMiniCluster();
}
}
示例10: testSanityCheckTTL
import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine; //导入依赖的package包/类
@Test
public void testSanityCheckTTL() throws IOException {
error.expect(DoNotRetryIOException.class);
error.expectMessage("Default TTL is not supported");
TableName tableName = TableName.valueOf(getClass().getSimpleName() + "-TTL");
TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName)
.setValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY,
FIFOCompactionPolicy.class.getName())
.setValue(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
DisabledRegionSplitPolicy.class.getName())
.addColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).build();
TEST_UTIL.getAdmin().createTable(desc);
}
示例11: testSanityCheckMinVersion
import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine; //导入依赖的package包/类
@Test
public void testSanityCheckMinVersion() throws IOException {
error.expect(DoNotRetryIOException.class);
error.expectMessage("MIN_VERSION > 0 is not supported for FIFO compaction");
TableName tableName = TableName.valueOf(getClass().getSimpleName() + "-MinVersion");
TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName)
.setValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY,
FIFOCompactionPolicy.class.getName())
.setValue(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
DisabledRegionSplitPolicy.class.getName())
.addColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(family).setTimeToLive(1)
.setMinVersions(1).build())
.build();
TEST_UTIL.getAdmin().createTable(desc);
}
示例12: testCompactionWithThroughputLimit
import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine; //导入依赖的package包/类
private long testCompactionWithThroughputLimit() throws Exception {
long throughputLimit = 1024L * 1024;
Configuration conf = TEST_UTIL.getConfiguration();
conf.set(StoreEngine.STORE_ENGINE_CLASS_KEY, DefaultStoreEngine.class.getName());
conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, 100);
conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_KEY, 200);
conf.setInt(HStore.BLOCKING_STOREFILES_KEY, 10000);
conf.setLong(
PressureAwareCompactionThroughputController
.HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_HIGHER_BOUND,
throughputLimit);
conf.setLong(
PressureAwareCompactionThroughputController
.HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_LOWER_BOUND,
throughputLimit);
conf.set(CompactionThroughputControllerFactory.HBASE_THROUGHPUT_CONTROLLER_KEY,
PressureAwareCompactionThroughputController.class.getName());
TEST_UTIL.startMiniCluster(1);
try {
HStore store = prepareData();
assertEquals(10, store.getStorefilesCount());
long startTime = System.currentTimeMillis();
TEST_UTIL.getAdmin().majorCompact(tableName);
while (store.getStorefilesCount() != 1) {
Thread.sleep(20);
}
long duration = System.currentTimeMillis() - startTime;
double throughput = (double) store.getStorefilesSize() / duration * 1000;
// confirm that the speed limit work properly(not too fast, and also not too slow)
// 20% is the max acceptable error rate.
assertTrue(throughput < throughputLimit * 1.2);
assertTrue(throughput > throughputLimit * 0.8);
return System.currentTimeMillis() - startTime;
} finally {
TEST_UTIL.shutdownMiniCluster();
}
}
示例13: setUpBeforeClass
import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine; //导入依赖的package包/类
@BeforeClass
public static void setUpBeforeClass() throws Exception {
TEST_UTIL.getConfiguration().setInt("hbase.master.info.port", 0);
TEST_UTIL.getConfiguration().setBoolean("hbase.regionserver.info.port.auto", true);
TEST_UTIL.getConfiguration().setInt("hfile.format.version", 3);
TEST_UTIL.getConfiguration().setClass("hbase.hregion.impl", HMobRegion.class,
HRegion.class);
TEST_UTIL.getConfiguration().setClass(DefaultStoreEngine.DEFAULT_STORE_FLUSHER_CLASS_KEY,
DefaultMobStoreFlusher.class, DefaultStoreFlusher.class);
TEST_UTIL.getConfiguration().setClass("hbase.coprocessor.master.classes",
MobMasterObserver.class, BaseMasterObserver.class);
TEST_UTIL.startMiniCluster(1);
}
示例14: setUpBeforeClass
import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine; //导入依赖的package包/类
@BeforeClass
public static void setUpBeforeClass() throws Exception {
TEST_UTIL.getConfiguration().setInt("hbase.master.info.port", 0);
TEST_UTIL.getConfiguration().setBoolean("hbase.regionserver.info.port.auto", true);
TEST_UTIL.getConfiguration().setInt("hfile.format.version", 3);
TEST_UTIL.getConfiguration().setClass("hbase.hregion.impl", HMobRegion.class,
HRegion.class);
TEST_UTIL.getConfiguration().setClass(DefaultStoreEngine.DEFAULT_STORE_FLUSHER_CLASS_KEY,
DefaultMobStoreFlusher.class, DefaultStoreFlusher.class);
TEST_UTIL.startMiniCluster();
TEST_UTIL.startMiniMapReduceCluster();
}
示例15: setUpBeforeClass
import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine; //导入依赖的package包/类
@BeforeClass
public static void setUpBeforeClass() throws Exception {
TEST_UTIL.getConfiguration().setInt("hbase.master.info.port", 0);
TEST_UTIL.getConfiguration().setBoolean("hbase.regionserver.info.port.auto", true);
TEST_UTIL.getConfiguration().setInt("hfile.format.version", 3);
TEST_UTIL.getConfiguration().setClass("hbase.hregion.impl", HMobRegion.class,
HRegion.class);
TEST_UTIL.getConfiguration().setClass(DefaultStoreEngine.DEFAULT_STORE_FLUSHER_CLASS_KEY,
DefaultMobStoreFlusher.class, DefaultStoreFlusher.class);
TEST_UTIL.startMiniCluster(1);
}