当前位置: 首页>>代码示例>>Java>>正文


Java EnvironmentEdgeManagerTestHelper类代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper的典型用法代码示例。如果您正苦于以下问题:Java EnvironmentEdgeManagerTestHelper类的具体用法?Java EnvironmentEdgeManagerTestHelper怎么用?Java EnvironmentEdgeManagerTestHelper使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


EnvironmentEdgeManagerTestHelper类属于org.apache.hadoop.hbase.util包,在下文中一共展示了EnvironmentEdgeManagerTestHelper类的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: tearDown

import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper; //导入依赖的package包/类
@After
public void tearDown() throws Exception {
  if (reader != null) {
    reader.close();
  }

  if (primaryRegion != null) {
    HRegion.closeHRegion(primaryRegion);
  }
  if (secondaryRegion != null) {
    HRegion.closeHRegion(secondaryRegion);
  }

  EnvironmentEdgeManagerTestHelper.reset();
  LOG.info("Cleaning test directory: " + TEST_UTIL.getDataTestDir());
  TEST_UTIL.cleanupTestDir();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:18,代码来源:TestHRegionReplayEvents.java

示例2: testIncrementColumnValue_heapSize

import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper; //导入依赖的package包/类
public void testIncrementColumnValue_heapSize() throws IOException {
  EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());

  this.region = initHRegion(tableName, getName(), conf, fam1);
  try {
    long byAmount = 1L;
    long size;

    for( int i = 0; i < 1000 ; i++) {
      region.incrementColumnValue(row, fam1, qual1, byAmount, true);

      size = region.memstoreSize.get();
      assertTrue("memstore size: " + size, size >= 0);
    }
  } finally {
    HRegion.closeHRegion(this.region);
    this.region = null;
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:20,代码来源:TestHRegion.java

示例3: setUpBeforeClass

import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper; //导入依赖的package包/类
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  TEST_UTIL.getConfiguration().setBoolean(QuotaUtil.QUOTA_CONF_KEY, true);
  TEST_UTIL.getConfiguration().setInt(QuotaCache.REFRESH_CONF_KEY, REFRESH_TIME);
  TEST_UTIL.getConfiguration().setInt("hbase.hstore.compactionThreshold", 10);
  TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100);
  TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 250);
  TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6);
  TEST_UTIL.getConfiguration().setBoolean("hbase.master.enabletable.roundrobin", true);
  TEST_UTIL.startMiniCluster(1);
  TEST_UTIL.waitTableAvailable(QuotaTableUtil.QUOTA_TABLE_NAME);
  QuotaCache.TEST_FORCE_REFRESH = true;

  tables = new Table[TABLE_NAMES.length];
  for (int i = 0; i < TABLE_NAMES.length; ++i) {
    tables[i] = TEST_UTIL.createTable(TABLE_NAMES[i], FAMILY);
  }

  envEdge = new ManualEnvironmentEdge();
  envEdge.setValue(EnvironmentEdgeManager.currentTime());
  EnvironmentEdgeManagerTestHelper.injectEdge(envEdge);
}
 
开发者ID:apache,项目名称:hbase,代码行数:23,代码来源:TestQuotaThrottle.java

示例4: tearDown

import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper; //导入依赖的package包/类
@After
public void tearDown() throws Exception {
  if (reader != null) {
    reader.close();
  }

  if (primaryRegion != null) {
    HBaseTestingUtility.closeRegionAndWAL(primaryRegion);
  }
  if (secondaryRegion != null) {
    HBaseTestingUtility.closeRegionAndWAL(secondaryRegion);
  }

  EnvironmentEdgeManagerTestHelper.reset();
  LOG.info("Cleaning test directory: " + TEST_UTIL.getDataTestDir());
  TEST_UTIL.cleanupTestDir();
}
 
开发者ID:apache,项目名称:hbase,代码行数:18,代码来源:TestHRegionReplayEvents.java

示例5: setUpBeforeClass

import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper; //导入依赖的package包/类
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  TEST_UTIL.getConfiguration().setBoolean("hbase.table.sanity.checks", false);
  TEST_UTIL.startMiniCluster();
  //ensure that server time increments every time we do an operation, otherwise
  //successive puts having the same timestamp will override each other
  EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:9,代码来源:TestThriftServerCmdLine.java

示例6: setUpBeforeClass

import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper; //导入依赖的package包/类
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  TEST_UTIL.getConfiguration().setBoolean("hbase.regionserver.thrift.http", true);
  TEST_UTIL.getConfiguration().setBoolean("hbase.table.sanity.checks", false);
  TEST_UTIL.startMiniCluster();
  //ensure that server time increments every time we do an operation, otherwise
  //successive puts having the same timestamp will override each other
  EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:10,代码来源:TestThriftHttpServer.java

示例7: cleanupFS

import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper; //导入依赖的package包/类
@After
public void cleanupFS() throws Exception {
  if (fs.exists(root)) {
    if (!fs.delete(root, true)) {
      throw new IOException("Failed to delete root test dir: " + root);
    }
    if (!fs.mkdirs(root)) {
      throw new IOException("Failed to create root test dir: " + root);
    }
  }
  EnvironmentEdgeManagerTestHelper.reset();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:13,代码来源:TestSnapshotDescriptionUtils.java

示例8: setUp

import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper; //导入依赖的package包/类
@Before
public void setUp() throws Exception {
  /* HBASE-6832: [WINDOWS] Tests should use explicit timestamp for Puts, and not rely on
   * implicit RS timing.
   * Use an explicit timer (IncrementingEnvironmentEdge) so that the put, delete
   * compact timestamps are tracked. Otherwise, forced major compaction will not purge
   * Delete's having the same timestamp. see ScanQueryMatcher.match():
   * if (retainDeletesInOutput
   *     || (!isUserScan && (EnvironmentEdgeManager.currentTime() - timestamp)
   *     <= timeToPurgeDeletes) ... )
   *
   */
  EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:15,代码来源:TestKeepDeletes.java

示例9: testIncrementColumnValue_BumpSnapshot

import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper; //导入依赖的package包/类
public void testIncrementColumnValue_BumpSnapshot() throws IOException {
  ManualEnvironmentEdge mee = new ManualEnvironmentEdge();
  EnvironmentEdgeManagerTestHelper.injectEdge(mee);
  this.region = initHRegion(tableName, getName(), conf, fam1);
  try {
    long value = 42L;
    long incr = 44L;

    // first put something in kvset, then snapshot it.
    Put put = new Put(row);
    put.add(fam1, qual1, Bytes.toBytes(value));
    region.put(put);

    // get the store in question:
    Store s = region.getStore(fam1);
    s.snapshot(); //bam

    // now increment:
    long newVal = region.incrementColumnValue(row, fam1, qual1,
        incr, false);

    assertEquals(value+incr, newVal);

    // get both versions:
    Get get = new Get(row);
    get.setMaxVersions();
    get.addColumn(fam1,qual1);

    Result r = region.get(get, null);
    assertEquals(2, r.size());
    KeyValue first = r.raw()[0];
    KeyValue second = r.raw()[1];

    assertTrue("ICV failed to upgrade timestamp",
        first.getTimestamp() != second.getTimestamp());
  } finally {
    HRegion.closeHRegion(this.region);
    this.region = null;
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:41,代码来源:TestHRegion.java

示例10: setUpBeforeClass

import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper; //导入依赖的package包/类
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  TEST_UTIL.startMiniCluster();
  //ensure that server time increments every time we do an operation, otherwise
  //successive puts having the same timestamp will override each other
  EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:8,代码来源:TestThriftServerCmdLine.java

示例11: setUp

import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper; //导入依赖的package包/类
@Before
public void setUp() throws Exception {
  /* HBASE-6832: [WINDOWS] Tests should use explicit timestamp for Puts, and not rely on
   * implicit RS timing.
   * Use an explicit timer (IncrementingEnvironmentEdge) so that the put, delete
   * compact timestamps are tracked. Otherwise, forced major compaction will not purge
   * Delete's having the same timestamp. see ScanQueryMatcher.match():
   * if (retainDeletesInOutput
   *     || (!isUserScan && (EnvironmentEdgeManager.currentTimeMillis() - timestamp)
   *     <= timeToPurgeDeletes) ... )
   *
   */
  EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:15,代码来源:TestKeepDeletes.java

示例12: testBypassAlsoCompletes

import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper; //导入依赖的package包/类
/**
 * Test that when bypass is called, we skip out calling any other coprocessors stacked up method,
 * in this case, a prePut.
 * If the column family is 'test', then bypass is invoked.
 */
@Test
public void testBypassAlsoCompletes() throws IOException {
  //ensure that server time increments every time we do an operation, otherwise
  //previous deletes will eclipse successive puts having the same timestamp
  EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());

  Table t = util.getConnection().getTable(tableName);
  List<Put> puts = new ArrayList<>();
  Put p = new Put(row1);
  p.addColumn(dummy, dummy, dummy);
  puts.add(p);
  p = new Put(row2);
  p.addColumn(test, dummy, dummy);
  puts.add(p);
  p = new Put(row3);
  p.addColumn(test, dummy, dummy);
  puts.add(p);
  t.put(puts);
  // Ensure expected result.
  checkRowAndDelete(t,row1,1);
  checkRowAndDelete(t,row2,0);
  checkRowAndDelete(t,row3,0);
  // We have three Coprocessors stacked up on the prePut. See the beforeClass setup. We did three
  // puts above two of which bypassed. A bypass means do not call the other coprocessors in the
  // stack so for the two 'test' calls in the above, we should not have call through to all all
  // three coprocessors in the chain. So we should have:
  // 3 invocations for first put + 1 invocation + 1 bypass for second put + 1 invocation +
  // 1 bypass for the last put. Assert.
  assertEquals("Total CP invocation count", 5, TestCoprocessor.PREPUT_INVOCATIONS.get());
  assertEquals("Total CP bypasses", 2, TestCoprocessor.PREPUT_BYPASSES.get());
}
 
开发者ID:apache,项目名称:hbase,代码行数:37,代码来源:TestRegionObserverBypass.java

示例13: tearDown

import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper; //导入依赖的package包/类
@After
public void tearDown() throws Exception {
  EnvironmentEdgeManagerTestHelper.reset();
  if (store != null) {
    try {
      store.close();
    } catch (IOException e) {
    }
    store = null;
  }
  if (region != null) {
    region.close();
    region = null;
  }
}
 
开发者ID:apache,项目名称:hbase,代码行数:16,代码来源:TestHStore.java

示例14: tearDown

import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper; //导入依赖的package包/类
@After
public void tearDown() throws Exception {
  EnvironmentEdgeManagerTestHelper.reset();
  LOG.info("Cleaning test directory: " + test_util.getDataTestDir());
  test_util.cleanupTestDir();
  CacheConfig.clearGlobalInstances();
}
 
开发者ID:apache,项目名称:hbase,代码行数:8,代码来源:TestScannerFromBucketCache.java


注:本文中的org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。