当前位置: 首页>>代码示例>>Java>>正文


Java Daemon.start方法代码示例

本文整理汇总了Java中org.apache.hadoop.util.Daemon.start方法的典型用法代码示例。如果您正苦于以下问题:Java Daemon.start方法的具体用法?Java Daemon.start怎么用?Java Daemon.start使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.util.Daemon的用法示例。


在下文中一共展示了Daemon.start方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: startExpiryDaemon

import org.apache.hadoop.util.Daemon; //导入方法依赖的package包/类
private synchronized void startExpiryDaemon() {
  // start daemon only if not already started
  if (isDaemonStarted() == true) {
    return;
  }
  
  daemon = new Daemon(new Runnable() {
    @Override
    public void run() {
      try {
        PeerCache.this.run();
      } catch(InterruptedException e) {
        //noop
      } finally {
        PeerCache.this.clear();
      }
    }

    @Override
    public String toString() {
      return String.valueOf(PeerCache.this);
    }
  });
  daemon.start();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:PeerCache.java

示例2: recoverBlocks

import org.apache.hadoop.util.Daemon; //导入方法依赖的package包/类
public Daemon recoverBlocks(
    final String who,
    final Collection<RecoveringBlock> blocks) {
  
  Daemon d = new Daemon(threadGroup, new Runnable() {
    /** Recover a list of blocks. It is run by the primary datanode. */
    @Override
    public void run() {
      for(RecoveringBlock b : blocks) {
        try {
          logRecoverBlock(who, b);
          recoverBlock(b);
        } catch (IOException e) {
          LOG.warn("recoverBlocks FAILED: " + b, e);
        }
      }
    }
  });
  d.start();
  return d;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:DataNode.java

示例3: startExpiryDaemon

import org.apache.hadoop.util.Daemon; //导入方法依赖的package包/类
private synchronized void startExpiryDaemon() {
  // start daemon only if not already started
  if (isDaemonStarted()) {
    return;
  }

  daemon = new Daemon(new Runnable() {
    @Override
    public void run() {
      try {
        PeerCache.this.run();
      } catch(InterruptedException e) {
        //noop
      } finally {
        PeerCache.this.clear();
      }
    }

    @Override
    public String toString() {
      return String.valueOf(PeerCache.this);
    }
  });
  daemon.start();
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:26,代码来源:PeerCache.java

示例4: recoverBlocks

import org.apache.hadoop.util.Daemon; //导入方法依赖的package包/类
public Daemon recoverBlocks(final String who,
    final Collection<RecoveringBlock> blocks) {
  Daemon d = new Daemon(datanode.threadGroup, new Runnable() {
    @Override
    public void run() {
      for(RecoveringBlock b : blocks) {
        try {
          logRecoverBlock(who, b);
          if (b.isStriped()) {
            new RecoveryTaskStriped((RecoveringStripedBlock) b).recover();
          } else {
            new RecoveryTaskContiguous(b).recover();
          }
        } catch (IOException e) {
          LOG.warn("recoverBlocks FAILED: " + b, e);
        }
      }
    }
  });
  d.start();
  return d;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:23,代码来源:BlockRecoveryWorker.java

示例5: recoverBlocks

import org.apache.hadoop.util.Daemon; //导入方法依赖的package包/类
public Daemon recoverBlocks(final String who,
    final Collection<RecoveringBlock> blocks) {
  
  Daemon d = new Daemon(threadGroup, new Runnable() {
    /** Recover a list of blocks. It is run by the primary datanode. */
    @Override
    public void run() {
      for (RecoveringBlock b : blocks) {
        try {
          logRecoverBlock(who, b);
          recoverBlock(b);
        } catch (IOException e) {
          LOG.warn("recoverBlocks FAILED: " + b, e);
        }
      }
    }
  });
  d.start();
  return d;
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:21,代码来源:DataNode.java

示例6: recoverBlocks

import org.apache.hadoop.util.Daemon; //导入方法依赖的package包/类
public Daemon recoverBlocks(final int namespaceId, final Block[] blocks,
    final DatanodeInfo[][] targets, long processStartTime) {
  final long deadline = processStartTime + blkRecoveryTimeout;
  Daemon d = new Daemon(threadGroup, new Runnable() {
    /** Recover a list of blocks. It is run by the primary datanode. */
    public void run() {
      for(int i = 0; i < blocks.length; i++) {
        try {
          logRecoverBlock("NameNode", namespaceId, blocks[i], targets[i]);
          recoverBlock(namespaceId, blocks[i], false, targets[i], true, deadline);
        } catch (IOException e) {
          LOG.warn("recoverBlocks FAILED, blocks[" + i + "]=" + blocks[i], e);
        }
      }
    }
  });
  d.start();
  return d;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:20,代码来源:DataNode.java

示例7: triggerFailover

import org.apache.hadoop.util.Daemon; //导入方法依赖的package包/类
/**
 * Triggers failover processing for safe mode and blocks until we have left
 * safe mode.
 * 
 * @throws IOException
 */
protected void triggerFailover() throws IOException {
  clearDataStructures();
  for (DatanodeInfo node : namesystem.datanodeReport(DatanodeReportType.LIVE)) {
    liveDatanodes.add(node);
    outStandingHeartbeats.add(node);
  }
  safeModeState = SafeModeState.FAILOVER_IN_PROGRESS;
  safeModeMonitor = new Daemon(new SafeModeMonitor(namesystem, this));
  safeModeMonitor.start();
  try {
    safeModeMonitor.join();
  } catch (InterruptedException ie) {
    throw new IOException("triggerSafeMode() interruped()");
  }
  if (safeModeState != SafeModeState.AFTER_FAILOVER) {
    throw new RuntimeException("safeModeState is : " + safeModeState +
        " which does not indicate a successfull exit of safemode");
  }
}
 
开发者ID:iVCE,项目名称:RDFS,代码行数:26,代码来源:StandbySafeMode.java

示例8: checkMode

import org.apache.hadoop.util.Daemon; //导入方法依赖的package包/类
/**
 * Check and trigger safe mode if needed. 
 */
private void checkMode() {
  if (needEnter()) {
    enter();
    reportStatus("STATE* Safe mode ON.", false);
    return;
  }
  // the threshold is reached
  if (!isOn() ||                           // safe mode is off
      extension <= 0 || threshold <= 0) {  // don't need to wait
    this.leave(true); // leave safe mode
    return;
  }
  if (reached > 0) {  // threshold has already been reached before
    reportStatus("STATE* Safe mode ON.", false);
    return;
  }
  // start monitor
  reached = now();
  smmthread = new Daemon(new SafeModeMonitor());
  smmthread.start();
  reportStatus("STATE* Safe mode extension entered.", true);
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:26,代码来源:FSNamesystem.java

示例9: recoverBlocks

import org.apache.hadoop.util.Daemon; //导入方法依赖的package包/类
public Daemon recoverBlocks(final Block[] blocks, final DatanodeInfo[][] targets) {
  Daemon d = new Daemon(threadGroup, new Runnable() {
    /** Recover a list of blocks. It is run by the primary datanode. */
    public void run() {
      for(int i = 0; i < blocks.length; i++) {
        try {
          logRecoverBlock("NameNode", blocks[i], targets[i]);
          recoverBlock(blocks[i], false, targets[i], true);
        } catch (IOException e) {
          LOG.warn("recoverBlocks FAILED, blocks[" + i + "]=" + blocks[i], e);
        }
      }
    }
  });
  d.start();
  return d;
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:18,代码来源:DataNode.java

示例10: main

import org.apache.hadoop.util.Daemon; //导入方法依赖的package包/类
/**
 * main() has some simple utility methods.
 * @param argv Command line parameters.
 * @exception Exception if the filesystem does not exist.
 */
public static void main(String[] argv) throws Exception {
  StringUtils.startupShutdownMessage(SecondaryNameNode.class, argv, LOG);
  Configuration tconf = new Configuration();
  try {
    argv = DFSUtil.setGenericConf(argv, tconf);
  } catch (IllegalArgumentException e) {
    System.err.println(e.getMessage());
    printUsage("");
    return;
  }
  if (argv.length >= 1) {
    SecondaryNameNode secondary = new SecondaryNameNode(tconf);
    int ret = secondary.processArgs(argv);
    System.exit(ret);
  }

  // Create a never ending deamon
  Daemon checkpointThread = new Daemon(new SecondaryNameNode(tconf)); 
  checkpointThread.start();
}
 
开发者ID:iVCE,项目名称:RDFS,代码行数:26,代码来源:SecondaryNameNode.java

示例11: startThreads

import org.apache.hadoop.util.Daemon; //导入方法依赖的package包/类
/** should be called before this object is used */
public void startThreads() throws IOException {
  Preconditions.checkState(!running);
  updateCurrentKey();
  synchronized (this) {
    running = true;
    tokenRemoverThread = new Daemon(new ExpiredTokenRemover());
    tokenRemoverThread.start();
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:11,代码来源:AbstractDelegationTokenSecretManager.java

示例12: put

import org.apache.hadoop.util.Daemon; //导入方法依赖的package包/类
public synchronized void put(final long inodeId, final DFSOutputStream out,
    final DFSClient dfsc) {
  if (dfsc.isClientRunning()) {
    if (!isRunning() || isRenewerExpired()) {
      //start a new deamon with a new id.
      final int id = ++currentId;
      daemon = new Daemon(new Runnable() {
        @Override
        public void run() {
          try {
            if (LOG.isDebugEnabled()) {
              LOG.debug("Lease renewer daemon for " + clientsString()
                  + " with renew id " + id + " started");
            }
            LeaseRenewer.this.run(id);
          } catch(InterruptedException e) {
            LOG.debug("LeaseRenewer is interrupted.", e);
          } finally {
            synchronized(LeaseRenewer.this) {
              Factory.INSTANCE.remove(LeaseRenewer.this);
            }
            if (LOG.isDebugEnabled()) {
              LOG.debug("Lease renewer daemon for " + clientsString()
                  + " with renew id " + id + " exited");
            }
          }
        }

        @Override
        public String toString() {
          return String.valueOf(LeaseRenewer.this);
        }
      });
      daemon.start();
    }
    dfsc.putFileBeingWritten(inodeId, out);
    emptyTime = Long.MAX_VALUE;
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:40,代码来源:LeaseRenewer.java

示例13: tryStart

import org.apache.hadoop.util.Daemon; //导入方法依赖的package包/类
void tryStart() {
  final Thread current = Thread.currentThread();
  if (running.compareAndSet(null, current)) {
    final Daemon daemon = new Daemon() {
      @Override
      public void run() {
        for (; isRunning(this);) {
          final long waitTime = checkCalls();
          tryStop(this);

          try {
            synchronized (AsyncCallHandler.this) {
              AsyncCallHandler.this.wait(waitTime);
            }
          } catch (InterruptedException e) {
            kill(this);
          }
        }
      }
    };

    final boolean set = running.compareAndSet(current, daemon);
    Preconditions.checkState(set);
    if (LOG.isDebugEnabled()) {
      LOG.debug("Starting AsyncCallQueue.Processor " + daemon);
    }
    daemon.start();
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:30,代码来源:AsyncCallHandler.java

示例14: triggerFailover

import org.apache.hadoop.util.Daemon; //导入方法依赖的package包/类
/**
 * Triggers failover processing for safe mode and blocks until we have left
 * safe mode.
 * 
 * @throws IOException
 */
protected void triggerFailover() throws IOException {
  clearDataStructures();
  
  // stop sending PREPARE_FAILOVER command
  // we are performing failover now
  prepareFailover = false;
  
  for (DatanodeInfo node : namesystem.datanodeReport(DatanodeReportType.LIVE)) {
    liveDatanodes.add(node);
    outStandingHeartbeats.add(node);
  }
  InjectionHandler
      .processEvent(InjectionEvent.STANDBY_ENTER_SAFE_MODE);
  safeModeState = SafeModeState.FAILOVER_IN_PROGRESS;
  InjectionHandler.processEvent(InjectionEvent.STANDBY_FAILOVER_INPROGRESS);
  safeModeMonitor = new Daemon(new SafeModeMonitor(namesystem, this));
  safeModeMonitor.start();
  try {
    safeModeMonitor.join();
  } catch (InterruptedException ie) {
    throw new IOException("triggerSafeMode() interruped()");
  }
  if (safeModeState != SafeModeState.AFTER_FAILOVER) {
    throw new IOException("safeModeState is : " + safeModeState +
        " which does not indicate a successfull exit of safemode");
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:34,代码来源:StandbySafeMode.java

示例15: initialize

import org.apache.hadoop.util.Daemon; //导入方法依赖的package包/类
protected void initialize() {
  // How often do we mirror the information from Collector
  mirrorPeriod = conf.getLong("mapred.resourceutilization.mirrorperiod",
          DEFAULT_MIRROR_PERIOD);
  // Make connection to the Collector
  connect();
  mirrorDaemon = new Daemon(new MirrorRun());
  mirrorDaemon.start();
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:10,代码来源:UtilizationCollectorCached.java


注:本文中的org.apache.hadoop.util.Daemon.start方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。