当前位置: 首页>>代码示例>>Java>>正文


Java SplitLogTask.isDone方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.SplitLogTask.isDone方法的典型用法代码示例。如果您正苦于以下问题:Java SplitLogTask.isDone方法的具体用法?Java SplitLogTask.isDone怎么用?Java SplitLogTask.isDone使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.SplitLogTask的用法示例。


在下文中一共展示了SplitLogTask.isDone方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getDataSetWatchSuccess

import org.apache.hadoop.hbase.SplitLogTask; //导入方法依赖的package包/类
void getDataSetWatchSuccess(String path, byte[] data) {
  SplitLogTask slt;
  try {
    slt = SplitLogTask.parseFrom(data);
  } catch (DeserializationException e) {
    LOG.warn("Failed parse", e);
    return;
  }
  synchronized (grabTaskLock) {
    if (workerInGrabTask) {
      // currentTask can change but that's ok
      String taskpath = currentTask;
      if (taskpath != null && taskpath.equals(path)) {
        ServerName serverName = manager.getServer().getServerName();
        // have to compare data. cannot compare version because then there
        // will be race with attemptToOwnTask()
        // cannot just check whether the node has been transitioned to
        // UNASSIGNED because by the time this worker sets the data watch
        // the node might have made two transitions - from owned by this
        // worker to unassigned to owned by another worker
        if (!slt.isOwned(serverName) && !slt.isDone(serverName) && !slt.isErr(serverName)
            && !slt.isResigned(serverName)) {
          LOG.info("task " + taskpath + " preempted from " + serverName
              + ", current task state and owner=" + slt.toString());
          worker.stopTask();
        }
      }
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:31,代码来源:ZkSplitLogWorkerCoordination.java

示例2: getDataSetWatchSuccess

import org.apache.hadoop.hbase.SplitLogTask; //导入方法依赖的package包/类
void getDataSetWatchSuccess(String path, byte[] data) {
  SplitLogTask slt;
  try {
    slt = SplitLogTask.parseFrom(data);
  } catch (DeserializationException e) {
    LOG.warn("Failed parse", e);
    return;
  }
  synchronized (grabTaskLock) {
    if (workerInGrabTask) {
      // currentTask can change but that's ok
      String taskpath = currentTask;
      if (taskpath != null && taskpath.equals(path)) {
        // have to compare data. cannot compare version because then there
        // will be race with attemptToOwnTask()
        // cannot just check whether the node has been transitioned to
        // UNASSIGNED because by the time this worker sets the data watch
        // the node might have made two transitions - from owned by this
        // worker to unassigned to owned by another worker
        if (! slt.isOwned(this.serverName) &&
            ! slt.isDone(this.serverName) &&
            ! slt.isErr(this.serverName) &&
            ! slt.isResigned(this.serverName)) {
          LOG.info("task " + taskpath + " preempted from " +
              serverName + ", current task state and owner=" + slt.toString());
          stopTask();
        }
      }
    }
  }
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:32,代码来源:SplitLogWorker.java

示例3: getDataSetWatchSuccess

import org.apache.hadoop.hbase.SplitLogTask; //导入方法依赖的package包/类
void getDataSetWatchSuccess(String path, byte[] data) {
  SplitLogTask slt;
  try {
    slt = SplitLogTask.parseFrom(data);
  } catch (DeserializationException e) {
    LOG.warn("Failed parse", e);
    return;
  }
  synchronized (grabTaskLock) {
    if (workerInGrabTask) {
      // currentTask can change but that's ok
      String taskpath = currentTask;
      if (taskpath != null && taskpath.equals(path)) {
        // have to compare data. cannot compare version because then there
        // will be race with attemptToOwnTask()
        // cannot just check whether the node has been transitioned to
        // UNASSIGNED because by the time this worker sets the data watch
        // the node might have made two transitions - from owned by this
        // worker to unassigned to owned by another worker
        if (!slt.isOwned(serverName) && !slt.isDone(serverName) && !slt.isErr(serverName)
            && !slt.isResigned(serverName)) {
          LOG.info("task " + taskpath + " preempted from " + serverName
              + ", current task state and owner=" + slt.toString());
          worker.stopTask();
        }
      }
    }
  }
}
 
开发者ID:apache,项目名称:hbase,代码行数:30,代码来源:ZkSplitLogWorkerCoordination.java

示例4: getDataSetWatchSuccess

import org.apache.hadoop.hbase.SplitLogTask; //导入方法依赖的package包/类
private void getDataSetWatchSuccess(String path, byte[] data, int version)
    throws DeserializationException {
  if (data == null) {
    if (version == Integer.MIN_VALUE) {
      // assume all done. The task znode suddenly disappeared.
      setDone(path, SUCCESS);
      return;
    }
    SplitLogCounters.tot_mgr_null_data.incrementAndGet();
    LOG.fatal("logic error - got null data " + path);
    setDone(path, FAILURE);
    return;
  }
  data = this.watcher.getRecoverableZooKeeper().removeMetaData(data);
  SplitLogTask slt = SplitLogTask.parseFrom(data);
  if (slt.isUnassigned()) {
    LOG.debug("task not yet acquired " + path + " ver = " + version);
    handleUnassignedTask(path);
  } else if (slt.isOwned()) {
    heartbeat(path, version, slt.getServerName());
  } else if (slt.isResigned()) {
    LOG.info("task " + path + " entered state: " + slt.toString());
    resubmitOrFail(path, FORCE);
  } else if (slt.isDone()) {
    LOG.info("task " + path + " entered state: " + slt.toString());
    if (taskFinisher != null && !ZKSplitLog.isRescanNode(watcher, path)) {
      if (taskFinisher.finish(slt.getServerName(), ZKSplitLog.getFileName(path)) == Status.DONE) {
        setDone(path, SUCCESS);
      } else {
        resubmitOrFail(path, CHECK);
      }
    } else {
      setDone(path, SUCCESS);
    }
  } else if (slt.isErr()) {
    LOG.info("task " + path + " entered state: " + slt.toString());
    resubmitOrFail(path, CHECK);
  } else {
    LOG.fatal("logic error - unexpected zk state for path = " + path + " data = "
        + slt.toString());
    setDone(path, FAILURE);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:44,代码来源:ZKSplitLogManagerCoordination.java

示例5: removeStaleRecoveringRegionsFromZK

import org.apache.hadoop.hbase.SplitLogTask; //导入方法依赖的package包/类
/**
 * It removes stale recovering regions under /hbase/recovering-regions/[encoded region name]
 * during master initialization phase.
 * @param failedServers A set of known failed servers
 * @throws KeeperException
 */
void removeStaleRecoveringRegionsFromZK(final Set<ServerName> failedServers)
    throws KeeperException {

  if (!this.distributedLogReplay) {
    // remove any regions in recovery from ZK which could happen when we turn the feature on
    // and later turn it off
    ZKUtil.deleteChildrenRecursively(watcher, watcher.recoveringRegionsZNode);
    // the function is only used in distributedLogReplay mode when master is in initialization
    return;
  }

  Set<String> knownFailedServers = new HashSet<String>();
  if (failedServers != null) {
    for (ServerName tmpServerName : failedServers) {
      knownFailedServers.add(tmpServerName.getServerName());
    }
  }

  this.recoveringRegionLock.lock();
  try {
    List<String> tasks = ZKUtil.listChildrenNoWatch(watcher, watcher.splitLogZNode);
    if (tasks != null) {
      for (String t : tasks) {
        byte[] data = ZKUtil.getData(this.watcher, ZKUtil.joinZNode(watcher.splitLogZNode, t));
        if (data != null) {
          SplitLogTask slt = null;
          try {
            slt = SplitLogTask.parseFrom(data);
          } catch (DeserializationException e) {
            LOG.warn("Failed parse data for znode " + t, e);
          }
          if (slt != null && slt.isDone()) {
            continue;
          }
        }
        // decode the file name
        t = ZKSplitLog.getFileName(t);
        ServerName serverName = HLogUtil.getServerNameFromHLogDirectoryName(new Path(t));
        if (serverName != null) {
          knownFailedServers.add(serverName.getServerName());
        } else {
          LOG.warn("Found invalid WAL log file name:" + t);
        }
      }
    }

    // remove recovering regions which doesn't have any RS associated with it
    List<String> regions = ZKUtil.listChildrenNoWatch(watcher, watcher.recoveringRegionsZNode);
    if (regions != null) {
      for (String region : regions) {
        String nodePath = ZKUtil.joinZNode(watcher.recoveringRegionsZNode, region);
        List<String> regionFailedServers = ZKUtil.listChildrenNoWatch(watcher, nodePath);
        if (regionFailedServers == null || regionFailedServers.isEmpty()) {
          ZKUtil.deleteNode(watcher, nodePath);
          continue;
        }
        boolean needMoreRecovery = false;
        for (String tmpFailedServer : regionFailedServers) {
          if (knownFailedServers.contains(tmpFailedServer)) {
            needMoreRecovery = true;
            break;
          }
        }
        if (!needMoreRecovery) {
          ZKUtil.deleteNodeRecursively(watcher, nodePath);
        }
      }
    }
  } finally {
    this.recoveringRegionLock.unlock();
  }
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:79,代码来源:SplitLogManager.java

示例6: getDataSetWatchSuccess

import org.apache.hadoop.hbase.SplitLogTask; //导入方法依赖的package包/类
private void getDataSetWatchSuccess(String path, byte[] data, int version)
throws DeserializationException {
  if (data == null) {
    if (version == Integer.MIN_VALUE) {
      // assume all done. The task znode suddenly disappeared.
      setDone(path, SUCCESS);
      return;
    }
    SplitLogCounters.tot_mgr_null_data.incrementAndGet();
    LOG.fatal("logic error - got null data " + path);
    setDone(path, FAILURE);
    return;
  }
  data = this.watcher.getRecoverableZooKeeper().removeMetaData(data);
  SplitLogTask slt = SplitLogTask.parseFrom(data);
  if (slt.isUnassigned()) {
    LOG.debug("task not yet acquired " + path + " ver = " + version);
    handleUnassignedTask(path);
  } else if (slt.isOwned()) {
    heartbeat(path, version, slt.getServerName());
  } else if (slt.isResigned()) {
    LOG.info("task " + path + " entered state: " + slt.toString());
    resubmitOrFail(path, FORCE);
  } else if (slt.isDone()) {
    LOG.info("task " + path + " entered state: " + slt.toString());
    if (taskFinisher != null && !ZKSplitLog.isRescanNode(watcher, path)) {
      if (taskFinisher.finish(slt.getServerName(), ZKSplitLog.getFileName(path)) == Status.DONE) {
        setDone(path, SUCCESS);
      } else {
        resubmitOrFail(path, CHECK);
      }
    } else {
      setDone(path, SUCCESS);
    }
  } else if (slt.isErr()) {
    LOG.info("task " + path + " entered state: " + slt.toString());
    resubmitOrFail(path, CHECK);
  } else {
    LOG.fatal("logic error - unexpected zk state for path = " + path + " data = " + slt.toString());
    setDone(path, FAILURE);
  }
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:43,代码来源:SplitLogManager.java

示例7: getDataSetWatchSuccess

import org.apache.hadoop.hbase.SplitLogTask; //导入方法依赖的package包/类
private void getDataSetWatchSuccess(String path, byte[] data, int version)
    throws DeserializationException {
  if (data == null) {
    if (version == Integer.MIN_VALUE) {
      // assume all done. The task znode suddenly disappeared.
      setDone(path, SUCCESS);
      return;
    }
    SplitLogCounters.tot_mgr_null_data.increment();
    LOG.error(HBaseMarkers.FATAL, "logic error - got null data " + path);
    setDone(path, FAILURE);
    return;
  }
  data = ZKMetadata.removeMetaData(data);
  SplitLogTask slt = SplitLogTask.parseFrom(data);
  if (slt.isUnassigned()) {
    LOG.debug("Task not yet acquired " + path + ", ver=" + version);
    handleUnassignedTask(path);
  } else if (slt.isOwned()) {
    heartbeat(path, version, slt.getServerName());
  } else if (slt.isResigned()) {
    LOG.info("Task " + path + " entered state=" + slt.toString());
    resubmitOrFail(path, FORCE);
  } else if (slt.isDone()) {
    LOG.info("Task " + path + " entered state=" + slt.toString());
    if (taskFinisher != null && !ZKSplitLog.isRescanNode(watcher, path)) {
      if (taskFinisher.finish(slt.getServerName(), ZKSplitLog.getFileName(path)) == Status.DONE) {
        setDone(path, SUCCESS);
      } else {
        resubmitOrFail(path, CHECK);
      }
    } else {
      setDone(path, SUCCESS);
    }
  } else if (slt.isErr()) {
    LOG.info("Task " + path + " entered state=" + slt.toString());
    resubmitOrFail(path, CHECK);
  } else {
    LOG.error(HBaseMarkers.FATAL, "logic error - unexpected zk state for path = "
        + path + " data = " + slt.toString());
    setDone(path, FAILURE);
  }
}
 
开发者ID:apache,项目名称:hbase,代码行数:44,代码来源:ZKSplitLogManagerCoordination.java


注:本文中的org.apache.hadoop.hbase.SplitLogTask.isDone方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。