当前位置: 首页>>代码示例>>Java>>正文


Java SplitLogCounters类代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.SplitLogCounters的典型用法代码示例。如果您正苦于以下问题:Java SplitLogCounters类的具体用法?Java SplitLogCounters怎么用?Java SplitLogCounters使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


SplitLogCounters类属于org.apache.hadoop.hbase包,在下文中一共展示了SplitLogCounters类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getDataSetWatchFailure

import org.apache.hadoop.hbase.SplitLogCounters; //导入依赖的package包/类
void getDataSetWatchFailure(String path) {
  synchronized (grabTaskLock) {
    if (workerInGrabTask) {
      // currentTask can change but that's ok
      String taskpath = currentTask;
      if (taskpath != null && taskpath.equals(path)) {
        LOG.info("retrying data watch on " + path);
        SplitLogCounters.tot_wkr_get_data_retry.incrementAndGet();
        getDataSetWatchAsync();
      } else {
        // no point setting a watch on the task which this worker is not
        // working upon anymore
      }
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:17,代码来源:ZkSplitLogWorkerCoordination.java

示例2: deleteNodeSuccess

import org.apache.hadoop.hbase.SplitLogCounters; //导入依赖的package包/类
private void deleteNodeSuccess(String path) {
  if (ignoreZKDeleteForTesting) {
    return;
  }
  Task task;
  task = details.getTasks().remove(path);
  if (task == null) {
    if (ZKSplitLog.isRescanNode(watcher, path)) {
      SplitLogCounters.tot_mgr_rescan_deleted.incrementAndGet();
    }
    SplitLogCounters.tot_mgr_missing_state_in_delete.incrementAndGet();
    LOG.debug("deleted task without in memory state " + path);
    return;
  }
  synchronized (task) {
    task.status = DELETED;
    task.notify();
  }
  SplitLogCounters.tot_mgr_task_deleted.incrementAndGet();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:21,代码来源:ZKSplitLogManagerCoordination.java

示例3: heartbeat

import org.apache.hadoop.hbase.SplitLogCounters; //导入依赖的package包/类
private void heartbeat(String path, int new_version, ServerName workerName) {
  Task task = findOrCreateOrphanTask(path);
  if (new_version != task.last_version) {
    if (task.isUnassigned()) {
      LOG.info("task " + path + " acquired by " + workerName);
    }
    task.heartbeat(EnvironmentEdgeManager.currentTime(), new_version, workerName);
    SplitLogCounters.tot_mgr_heartbeat.incrementAndGet();
  } else {
    // duplicate heartbeats - heartbeats w/o zk node version
    // changing - are possible. The timeout thread does
    // getDataSetWatch() just to check whether a node still
    // exists or not
  }
  return;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:17,代码来源:ZKSplitLogManagerCoordination.java

示例4: testGetPreviousRecoveryMode

import org.apache.hadoop.hbase.SplitLogCounters; //导入依赖的package包/类
@Ignore("DLR is broken by HBASE-12751") @Test(timeout=60000)
public void testGetPreviousRecoveryMode() throws Exception {
  LOG.info("testGetPreviousRecoveryMode");
  SplitLogCounters.resetCounters();
  // Not actually enabling DLR for the cluster, just for the ZkCoordinatedStateManager to use.
  // The test is just manipulating ZK manually anyways.
  conf.setBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, true);

  zkw.getRecoverableZooKeeper().create(ZKSplitLog.getEncodedNodeName(zkw, "testRecovery"),
    new SplitLogTask.Unassigned(
      ServerName.valueOf("mgr,1,1"), RecoveryMode.LOG_SPLITTING).toByteArray(),
      Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);

  slm = new SplitLogManager(ds, conf, stopper, master, DUMMY_MASTER);
  LOG.info("Mode1=" + slm.getRecoveryMode());
  assertTrue(slm.isLogSplitting());
  zkw.getRecoverableZooKeeper().delete(ZKSplitLog.getEncodedNodeName(zkw, "testRecovery"), -1);
  LOG.info("Mode2=" + slm.getRecoveryMode());
  slm.setRecoveryMode(false);
  LOG.info("Mode3=" + slm.getRecoveryMode());
  assertTrue("Mode4=" + slm.getRecoveryMode(), slm.isLogReplaying());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:TestSplitLogManager.java

示例5: setup

import org.apache.hadoop.hbase.SplitLogCounters; //导入依赖的package包/类
@Before
public void setup() throws Exception {
  TEST_UTIL.startMiniZKCluster();
  Configuration conf = TEST_UTIL.getConfiguration();
  zkw = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),
      "split-log-worker-tests", null);
  ds = new DummyServer(zkw, conf);
  ZKUtil.deleteChildrenRecursively(zkw, zkw.baseZNode);
  ZKUtil.createAndFailSilent(zkw, zkw.baseZNode);
  assertThat(ZKUtil.checkExists(zkw, zkw.baseZNode), not (is(-1)));
  LOG.debug(zkw.baseZNode + " created");
  ZKUtil.createAndFailSilent(zkw, zkw.splitLogZNode);
  assertThat(ZKUtil.checkExists(zkw, zkw.splitLogZNode), not (is(-1)));

  LOG.debug(zkw.splitLogZNode + " created");
  ZKUtil.createAndFailSilent(zkw, zkw.rsZNode);
  assertThat(ZKUtil.checkExists(zkw, zkw.rsZNode), not (is(-1)));

  SplitLogCounters.resetCounters();
  executorService = new ExecutorService("TestSplitLogWorker");
  executorService.startExecutorService(ExecutorType.RS_LOG_REPLAY_OPS, 10);
  this.mode = (conf.getBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, false) ?
      RecoveryMode.LOG_REPLAY : RecoveryMode.LOG_SPLITTING);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:25,代码来源:TestSplitLogWorker.java

示例6: testAcquireTaskAtStartup

import org.apache.hadoop.hbase.SplitLogCounters; //导入依赖的package包/类
@Test(timeout=60000)
public void testAcquireTaskAtStartup() throws Exception {
  LOG.info("testAcquireTaskAtStartup");
  SplitLogCounters.resetCounters();
  final String TATAS = "tatas";
  final ServerName RS = ServerName.valueOf("rs,1,1");
  RegionServerServices mockedRS = getRegionServer(RS);
  zkw.getRecoverableZooKeeper().create(ZKSplitLog.getEncodedNodeName(zkw, TATAS),
    new SplitLogTask.Unassigned(ServerName.valueOf("mgr,1,1"), this.mode).toByteArray(),
      Ids.OPEN_ACL_UNSAFE,
      CreateMode.PERSISTENT);

  SplitLogWorker slw =
      new SplitLogWorker(ds, TEST_UTIL.getConfiguration(), mockedRS, neverEndingTask);
  slw.start();
  try {
    waitForCounter(SplitLogCounters.tot_wkr_task_acquired, 0, 1, WAIT_TIME);
    byte [] bytes = ZKUtil.getData(zkw, ZKSplitLog.getEncodedNodeName(zkw, TATAS));
    SplitLogTask slt = SplitLogTask.parseFrom(bytes);
    assertTrue(slt.isOwned(RS));
  } finally {
   stopSplitLogWorker(slw);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:25,代码来源:TestSplitLogWorker.java

示例7: testGetPreviousRecoveryMode

import org.apache.hadoop.hbase.SplitLogCounters; //导入依赖的package包/类
@Test(timeout=60000)
public void testGetPreviousRecoveryMode() throws Exception {
  LOG.info("testGetPreviousRecoveryMode");
  SplitLogCounters.resetCounters();
  Configuration testConf = HBaseConfiguration.create(TEST_UTIL.getConfiguration());
  testConf.setBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, true);
  ds = new DummyServer(zkw, testConf);

  zkw.getRecoverableZooKeeper().create(ZKSplitLog.getEncodedNodeName(zkw, "testRecovery"),
    new SplitLogTask.Unassigned(
      ServerName.valueOf("mgr,1,1"), RecoveryMode.LOG_SPLITTING).toByteArray(),
      Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);

  slm = new SplitLogManager(ds, testConf, stopper, master, DUMMY_MASTER);
  LOG.info("Mode1=" + slm.getRecoveryMode());
  assertTrue(slm.isLogSplitting());
  zkw.getRecoverableZooKeeper().delete(ZKSplitLog.getEncodedNodeName(zkw, "testRecovery"), -1);
  LOG.info("Mode2=" + slm.getRecoveryMode());
  slm.setRecoveryMode(false);
  LOG.info("Mode3=" + slm.getRecoveryMode());
  assertTrue("Mode4=" + slm.getRecoveryMode(), slm.isLogReplaying());
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:23,代码来源:TestSplitLogManager.java

示例8: startCluster

import org.apache.hadoop.hbase.SplitLogCounters; //导入依赖的package包/类
private void startCluster(int num_rs) throws Exception {
  SplitLogCounters.resetCounters();
  LOG.info("Starting cluster");
  conf.getLong("hbase.splitlog.max.resubmit", 0);
  // Make the failure test faster
  conf.setInt("zookeeper.recovery.retry", 0);
  conf.setInt(HConstants.REGIONSERVER_INFO_PORT, -1);
  conf.setFloat(HConstants.LOAD_BALANCER_SLOP_KEY, (float) 100.0); // no load balancing
  conf.setInt("hbase.regionserver.wal.max.splitters", 3);
  TEST_UTIL.shutdownMiniHBaseCluster();
  TEST_UTIL = new HBaseTestingUtility(conf);
  TEST_UTIL.setDFSCluster(dfsCluster);
  TEST_UTIL.setZkCluster(zkCluster);
  TEST_UTIL.startMiniHBaseCluster(NUM_MASTERS, num_rs);
  cluster = TEST_UTIL.getHBaseCluster();
  LOG.info("Waiting for active/ready master");
  cluster.waitForActiveAndReadyMaster();
  master = cluster.getMaster();
  while (cluster.getLiveRegionServerThreads().size() < num_rs) {
    Threads.sleep(1);
  }
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:23,代码来源:TestDistributedLogSplitting.java

示例9: heartbeat

import org.apache.hadoop.hbase.SplitLogCounters; //导入依赖的package包/类
private void heartbeat(String path, int new_version, ServerName workerName) {
  Task task = findOrCreateOrphanTask(path);
  if (new_version != task.last_version) {
    if (task.isUnassigned()) {
      LOG.info("task " + path + " acquired by " + workerName);
    }
    task.heartbeat(EnvironmentEdgeManager.currentTimeMillis(), new_version, workerName);
    SplitLogCounters.tot_mgr_heartbeat.incrementAndGet();
  } else {
    // duplicate heartbeats - heartbeats w/o zk node version
    // changing - are possible. The timeout thread does
    // getDataSetWatch() just to check whether a node still
    // exists or not
  }
  return;
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:17,代码来源:SplitLogManager.java

示例10: deleteNodeSuccess

import org.apache.hadoop.hbase.SplitLogCounters; //导入依赖的package包/类
private void deleteNodeSuccess(String path) {
  if (ignoreZKDeleteForTesting) {
    return;
  }
  Task task;
  task = tasks.remove(path);
  if (task == null) {
    if (ZKSplitLog.isRescanNode(watcher, path)) {
      SplitLogCounters.tot_mgr_rescan_deleted.incrementAndGet();
    }
    SplitLogCounters.tot_mgr_missing_state_in_delete.incrementAndGet();
    LOG.debug("deleted task without in memory state " + path);
    return;
  }
  synchronized (task) {
    task.status = DELETED;
    task.notify();
  }
  SplitLogCounters.tot_mgr_task_deleted.incrementAndGet();
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:21,代码来源:SplitLogManager.java

示例11: startCluster

import org.apache.hadoop.hbase.SplitLogCounters; //导入依赖的package包/类
private void startCluster(int num_rs) throws Exception {
  SplitLogCounters.resetCounters();
  LOG.info("Starting cluster");
  conf.getLong("hbase.splitlog.max.resubmit", 0);
  // Make the failure test faster
  conf.setInt("zookeeper.recovery.retry", 0);
  conf.setInt(HConstants.REGIONSERVER_INFO_PORT, -1);
  conf.setFloat(HConstants.LOAD_BALANCER_SLOP_KEY, (float) 100.0); // no load balancing
  conf.setInt("hbase.regionserver.wal.max.splitters", 3);
  TEST_UTIL = new HBaseTestingUtility(conf);
  TEST_UTIL.setDFSCluster(dfsCluster);
  TEST_UTIL.setZkCluster(zkCluster);
  TEST_UTIL.startMiniHBaseCluster(NUM_MASTERS, num_rs);
  cluster = TEST_UTIL.getHBaseCluster();
  LOG.info("Waiting for active/ready master");
  cluster.waitForActiveAndReadyMaster();
  master = cluster.getMaster();
  while (cluster.getLiveRegionServerThreads().size() < num_rs) {
    Threads.sleep(1);
  }
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:22,代码来源:TestDistributedLogSplitting.java

示例12: setup

import org.apache.hadoop.hbase.SplitLogCounters; //导入依赖的package包/类
@Before
public void setup() throws Exception {
  TEST_UTIL.startMiniZKCluster();
  zkw = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),
      "split-log-worker-tests", null);
  ZKUtil.deleteChildrenRecursively(zkw, zkw.baseZNode);
  ZKUtil.createAndFailSilent(zkw, zkw.baseZNode);
  assertTrue(ZKUtil.checkExists(zkw, zkw.baseZNode) != -1);
  LOG.debug(zkw.baseZNode + " created");
  ZKUtil.createAndFailSilent(zkw, zkw.splitLogZNode);
  assertTrue(ZKUtil.checkExists(zkw, zkw.splitLogZNode) != -1);
  LOG.debug(zkw.splitLogZNode + " created");
  ZKUtil.createAndFailSilent(zkw, zkw.rsZNode);
  assertTrue(ZKUtil.checkExists(zkw, zkw.rsZNode) != -1);
  SplitLogCounters.resetCounters();
  executorService = new ExecutorService("TestSplitLogWorker");
  executorService.startExecutorService(ExecutorType.RS_LOG_REPLAY_OPS, 10);
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:19,代码来源:TestSplitLogWorker.java

示例13: testAcquireTaskAtStartup

import org.apache.hadoop.hbase.SplitLogCounters; //导入依赖的package包/类
@Test(timeout=60000)
public void testAcquireTaskAtStartup() throws Exception {
  LOG.info("testAcquireTaskAtStartup");
  SplitLogCounters.resetCounters();
  final String TATAS = "tatas";
  final ServerName RS = ServerName.valueOf("rs,1,1");
  RegionServerServices mockedRS = getRegionServer(RS);
  zkw.getRecoverableZooKeeper().create(ZKSplitLog.getEncodedNodeName(zkw, TATAS),
    new SplitLogTask.Unassigned(ServerName.valueOf("mgr,1,1")).toByteArray(), Ids.OPEN_ACL_UNSAFE,
      CreateMode.PERSISTENT);

  SplitLogWorker slw =
      new SplitLogWorker(zkw, TEST_UTIL.getConfiguration(), mockedRS, neverEndingTask);
  slw.start();
  try {
    waitForCounter(SplitLogCounters.tot_wkr_task_acquired, 0, 1, WAIT_TIME);
    byte [] bytes = ZKUtil.getData(zkw, ZKSplitLog.getEncodedNodeName(zkw, TATAS));
    SplitLogTask slt = SplitLogTask.parseFrom(bytes);
    assertTrue(slt.isOwned(RS));
  } finally {
    stopSplitLogWorker(slw);
  }
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:24,代码来源:TestSplitLogWorker.java

示例14: getDataSetWatchFailure

import org.apache.hadoop.hbase.SplitLogCounters; //导入依赖的package包/类
void getDataSetWatchFailure(String path) {
  synchronized (grabTaskLock) {
    if (workerInGrabTask) {
      // currentTask can change but that's ok
      String taskpath = currentTask;
      if (taskpath != null && taskpath.equals(path)) {
        LOG.info("retrying data watch on " + path);
        SplitLogCounters.tot_wkr_get_data_retry.increment();
        getDataSetWatchAsync();
      } else {
        // no point setting a watch on the task which this worker is not
        // working upon anymore
      }
    }
  }
}
 
开发者ID:apache,项目名称:hbase,代码行数:17,代码来源:ZkSplitLogWorkerCoordination.java

示例15: deleteNodeSuccess

import org.apache.hadoop.hbase.SplitLogCounters; //导入依赖的package包/类
private void deleteNodeSuccess(String path) {
  if (ignoreZKDeleteForTesting) {
    return;
  }
  Task task;
  task = details.getTasks().remove(path);
  if (task == null) {
    if (ZKSplitLog.isRescanNode(watcher, path)) {
      SplitLogCounters.tot_mgr_rescan_deleted.increment();
    }
    SplitLogCounters.tot_mgr_missing_state_in_delete.increment();
    LOG.debug("Deleted task without in memory state " + path);
    return;
  }
  synchronized (task) {
    task.status = DELETED;
    task.notify();
  }
  SplitLogCounters.tot_mgr_task_deleted.increment();
}
 
开发者ID:apache,项目名称:hbase,代码行数:21,代码来源:ZKSplitLogManagerCoordination.java


注:本文中的org.apache.hadoop.hbase.SplitLogCounters类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。