当前位置: 首页>>代码示例>>Java>>正文


Java SystemClock类代码示例

本文整理汇总了Java中org.apache.hadoop.yarn.util.SystemClock的典型用法代码示例。如果您正苦于以下问题:Java SystemClock类的具体用法?Java SystemClock怎么用?Java SystemClock使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


SystemClock类属于org.apache.hadoop.yarn.util包,在下文中一共展示了SystemClock类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: AngelApplicationMaster

import org.apache.hadoop.yarn.util.SystemClock; //导入依赖的package包/类
public AngelApplicationMaster(Configuration conf, String appName,
    ApplicationAttemptId applicationAttemptId, ContainerId containerId, String nmHost, int nmPort,
    int nmHttpPort, long appSubmitTime, Credentials credentials)  {
  super(AngelApplicationMaster.class.getName());
  this.conf = conf;
  this.appName = appName;
  this.appAttemptId = applicationAttemptId;
  this.appSubmitTime = appSubmitTime;
  this.containerId = containerId;
  this.nmHost = nmHost;
  this.nmPort = nmPort;
  this.nmHttpPort = nmHttpPort;

  this.clock = new SystemClock();
  this.startTime = clock.getTime();
  this.isCleared = false;
  this.credentials = credentials;

  appContext = new RunningAppContext(conf);
  angelApp = new App(appContext);
  lock = new ReentrantLock();
}
 
开发者ID:Tencent,项目名称:angel,代码行数:23,代码来源:AngelApplicationMaster.java

示例2: CGroupsHandlerImpl

import org.apache.hadoop.yarn.util.SystemClock; //导入依赖的package包/类
public CGroupsHandlerImpl(Configuration conf, PrivilegedOperationExecutor
    privilegedOperationExecutor) throws ResourceHandlerException {
  this.cGroupPrefix = conf.get(YarnConfiguration.
      NM_LINUX_CONTAINER_CGROUPS_HIERARCHY, "/hadoop-yarn")
      .replaceAll("^/", "").replaceAll("$/", "");
  this.enableCGroupMount = conf.getBoolean(YarnConfiguration.
      NM_LINUX_CONTAINER_CGROUPS_MOUNT, false);
  this.cGroupMountPath = conf.get(YarnConfiguration.
      NM_LINUX_CONTAINER_CGROUPS_MOUNT_PATH, null);
  this.deleteCGroupTimeout = conf.getLong(
      YarnConfiguration.NM_LINUX_CONTAINER_CGROUPS_DELETE_TIMEOUT,
      YarnConfiguration.DEFAULT_NM_LINUX_CONTAINER_CGROUPS_DELETE_TIMEOUT);
  this.deleteCGroupDelay =
      conf.getLong(YarnConfiguration.NM_LINUX_CONTAINER_CGROUPS_DELETE_DELAY,
          YarnConfiguration.DEFAULT_NM_LINUX_CONTAINER_CGROUPS_DELETE_DELAY);
  this.controllerPaths = new HashMap<>();
  this.rwLock = new ReentrantReadWriteLock();
  this.privilegedOperationExecutor = privilegedOperationExecutor;
  this.clock = new SystemClock();

  init();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:CGroupsHandlerImpl.java

示例3: setUp

import org.apache.hadoop.yarn.util.SystemClock; //导入依赖的package包/类
@Before
public void setUp() throws Exception {
  conf = new FairSchedulerConfiguration();
  FairScheduler scheduler = mock(FairScheduler.class);
  AllocationConfiguration allocConf = new AllocationConfiguration(conf);
  when(scheduler.getAllocationConfiguration()).thenReturn(allocConf);
  when(scheduler.getConf()).thenReturn(conf);
  SystemClock clock = new SystemClock();
  when(scheduler.getClock()).thenReturn(clock);
  notEmptyQueues = new HashSet<FSQueue>();
  queueManager = new QueueManager(scheduler) {
    @Override
    public boolean isEmpty(FSQueue queue) {
      return !notEmptyQueues.contains(queue);
    }
  };
  FSQueueMetrics.forQueue("root", null, true, conf);
  queueManager.initialize(conf);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestQueueManager.java

示例4: testReportDiagnostics

import org.apache.hadoop.yarn.util.SystemClock; //导入依赖的package包/类
@Test
public void testReportDiagnostics() throws Exception {
  JobID jobID = JobID.forName("job_1234567890000_0001");
  JobId jobId = TypeConverter.toYarn(jobID);
  final String diagMsg = "some diagnostic message";
  final JobDiagnosticsUpdateEvent diagUpdateEvent =
      new JobDiagnosticsUpdateEvent(jobId, diagMsg);
  MRAppMetrics mrAppMetrics = MRAppMetrics.create();
  AppContext mockContext = mock(AppContext.class);
  when(mockContext.hasSuccessfullyUnregistered()).thenReturn(true);
  JobImpl job = new JobImpl(jobId, Records
      .newRecord(ApplicationAttemptId.class), new Configuration(),
      mock(EventHandler.class),
      null, mock(JobTokenSecretManager.class), null,
      new SystemClock(), null,
      mrAppMetrics, null, true, null, 0, null, mockContext, null, null);
  job.handle(diagUpdateEvent);
  String diagnostics = job.getReport().getDiagnostics();
  Assert.assertNotNull(diagnostics);
  Assert.assertTrue(diagnostics.contains(diagMsg));

  job = new JobImpl(jobId, Records
      .newRecord(ApplicationAttemptId.class), new Configuration(),
      mock(EventHandler.class),
      null, mock(JobTokenSecretManager.class), null,
      new SystemClock(), null,
      mrAppMetrics, null, true, null, 0, null, mockContext, null, null);
  job.handle(new JobEvent(jobId, JobEventType.JOB_KILL));
  job.handle(diagUpdateEvent);
  diagnostics = job.getReport().getDiagnostics();
  Assert.assertNotNull(diagnostics);
  Assert.assertTrue(diagnostics.contains(diagMsg));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:34,代码来源:TestJobImpl.java

示例5: createCommitterEventHandler

import org.apache.hadoop.yarn.util.SystemClock; //导入依赖的package包/类
private static CommitterEventHandler createCommitterEventHandler(
    Dispatcher dispatcher, OutputCommitter committer) {
  final SystemClock clock = new SystemClock();
  AppContext appContext = mock(AppContext.class);
  when(appContext.getEventHandler()).thenReturn(
      dispatcher.getEventHandler());
  when(appContext.getClock()).thenReturn(clock);
  RMHeartbeatHandler heartbeatHandler = new RMHeartbeatHandler() {
    @Override
    public long getLastHeartbeatTime() {
      return clock.getTime();
    }
    @Override
    public void runOnNextHeartbeat(Runnable callback) {
      callback.run();
    }
  };
  ApplicationAttemptId id = 
    ConverterUtils.toApplicationAttemptId("appattempt_1234567890000_0001_0");
  when(appContext.getApplicationID()).thenReturn(id.getApplicationId());
  when(appContext.getApplicationAttemptId()).thenReturn(id);
  CommitterEventHandler handler =
      new CommitterEventHandler(appContext, committer, heartbeatHandler);
  dispatcher.register(CommitterEventType.class, handler);
  return handler;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:TestJobImpl.java

示例6: StubbedJob

import org.apache.hadoop.yarn.util.SystemClock; //导入依赖的package包/类
public StubbedJob(JobId jobId, ApplicationAttemptId applicationAttemptId,
    Configuration conf, EventHandler eventHandler, boolean newApiCommitter,
    String user, int numSplits, AppContext appContext) {
  super(jobId, applicationAttemptId, conf, eventHandler,
      null, new JobTokenSecretManager(), new Credentials(),
      new SystemClock(), Collections.<TaskId, TaskInfo> emptyMap(),
      MRAppMetrics.create(), null, newApiCommitter, user,
      System.currentTimeMillis(), null, appContext, null, null);

  initTransition = getInitTransition(numSplits);
  localFactory = stateMachineFactory.addTransition(JobStateInternal.NEW,
        EnumSet.of(JobStateInternal.INITED, JobStateInternal.FAILED),
        JobEventType.JOB_INIT,
        // This is abusive.
        initTransition);

  // This "this leak" is okay because the retained pointer is in an
  //  instance variable.
  localStateMachine = localFactory.make(this);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:TestJobImpl.java

示例7: testCreateDirsWithFileSystemBecomingAvailBeforeTimeout

import org.apache.hadoop.yarn.util.SystemClock; //导入依赖的package包/类
@Test
public void testCreateDirsWithFileSystemBecomingAvailBeforeTimeout()
    throws Exception {
  dfsCluster.getFileSystem().setSafeMode(
      HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
  Assert.assertTrue(dfsCluster.getFileSystem().isInSafeMode());
  new Thread() {
    @Override
    public void run() {
      try {
        Thread.sleep(500);
        dfsCluster.getFileSystem().setSafeMode(
            HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
        Assert.assertTrue(dfsCluster.getFileSystem().isInSafeMode());
      } catch (Exception ex) {
        Assert.fail(ex.toString());
      }
    }
  }.start();
  testCreateHistoryDirs(dfsCluster.getConfiguration(0), new SystemClock());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:TestHistoryFileManager.java

示例8: testCreateDirsWithFileSystemNotBecomingAvailBeforeTimeout

import org.apache.hadoop.yarn.util.SystemClock; //导入依赖的package包/类
@Test(expected = YarnRuntimeException.class)
public void testCreateDirsWithFileSystemNotBecomingAvailBeforeTimeout()
    throws Exception {
  dfsCluster.getFileSystem().setSafeMode(
      HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
  Assert.assertTrue(dfsCluster.getFileSystem().isInSafeMode());
  final ControlledClock clock = new ControlledClock(new SystemClock());
  clock.setTime(1);
  new Thread() {
    @Override
    public void run() {
      try {
        Thread.sleep(500);
        clock.setTime(3000);
      } catch (Exception ex) {
        Assert.fail(ex.toString());
      }
    }
  }.start();
  testCreateHistoryDirs(dfsCluster.getConfiguration(0), clock);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:TestHistoryFileManager.java

示例9: testEmptyChildQueues

import org.apache.hadoop.yarn.util.SystemClock; //导入依赖的package包/类
@Test
public void testEmptyChildQueues() throws Exception {
  FairSchedulerConfiguration conf = new FairSchedulerConfiguration();
  FairScheduler scheduler = mock(FairScheduler.class);
  AllocationConfiguration allocConf = new AllocationConfiguration(conf);
  when(scheduler.getAllocationConfiguration()).thenReturn(allocConf);
  when(scheduler.getConf()).thenReturn(conf);
  when(scheduler.getClusterResource()).thenReturn(Resource.newInstance(1, 1));
  SystemClock clock = new SystemClock();
  when(scheduler.getClock()).thenReturn(clock);
  QueueManager queueManager = new QueueManager(scheduler);
  queueManager.initialize(conf);

  FSQueue testQueue = queueManager.getLeafQueue("test", true);
  FairSchedulerQueueInfo queueInfo =
      new FairSchedulerQueueInfo(testQueue, scheduler);
  Collection<FairSchedulerQueueInfo> childQueues =
      queueInfo.getChildQueues();
  Assert.assertNotNull(childQueues);
  Assert.assertEquals("Child QueueInfo was not empty", 0, childQueues.size());
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:22,代码来源:TestFairSchedulerQueueInfo.java

示例10: createCommitterEventHandler

import org.apache.hadoop.yarn.util.SystemClock; //导入依赖的package包/类
private static CommitterEventHandler createCommitterEventHandler(
    Dispatcher dispatcher, OutputCommitter committer) {
  final SystemClock clock = new SystemClock();
  AppContext appContext = mock(AppContext.class);
  when(appContext.getEventHandler()).thenReturn(
      dispatcher.getEventHandler());
  when(appContext.getClock()).thenReturn(clock);
  RMHeartbeatHandler heartbeatHandler = new RMHeartbeatHandler() {
    @Override
    public long getLastHeartbeatTime() {
      return clock.getTime();
    }
    @Override
    public void runOnNextHeartbeat(Runnable callback) {
      callback.run();
    }
  };
  ApplicationAttemptId id = ApplicationAttemptId.fromString(
      "appattempt_1234567890000_0001_0");
  when(appContext.getApplicationID()).thenReturn(id.getApplicationId());
  when(appContext.getApplicationAttemptId()).thenReturn(id);
  CommitterEventHandler handler =
      new CommitterEventHandler(appContext, committer, heartbeatHandler);
  dispatcher.register(CommitterEventType.class, handler);
  return handler;
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:27,代码来源:TestJobImpl.java

示例11: StubbedJob

import org.apache.hadoop.yarn.util.SystemClock; //导入依赖的package包/类
public StubbedJob(JobId jobId, ApplicationAttemptId applicationAttemptId,
    Configuration conf, EventHandler eventHandler,
    boolean newApiCommitter, String user, int numSplits) {
  super(jobId, applicationAttemptId, conf, eventHandler,
      null, new JobTokenSecretManager(), new Credentials(),
      new SystemClock(), Collections.<TaskId, TaskInfo> emptyMap(),
      MRAppMetrics.create(), null, newApiCommitter, user,
      System.currentTimeMillis(), null, null, null, null);

  initTransition = getInitTransition(numSplits);
  localFactory = stateMachineFactory.addTransition(JobStateInternal.NEW,
        EnumSet.of(JobStateInternal.INITED, JobStateInternal.FAILED),
        JobEventType.JOB_INIT,
        // This is abusive.
        initTransition);

  // This "this leak" is okay because the retained pointer is in an
  //  instance variable.
  localStateMachine = localFactory.make(this);
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:21,代码来源:TestJobImpl.java

示例12: FairScheduler

import org.apache.hadoop.yarn.util.SystemClock; //导入依赖的package包/类
public FairScheduler() {
  super(FairScheduler.class.getName());
  clock = new SystemClock();
  allocsLoader = new AllocationFileLoaderService();
  queueMgr = new QueueManager(this);
  maxRunningEnforcer = new MaxRunningAppsEnforcer(this);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:FairScheduler.java

示例13: getMockMapTask

import org.apache.hadoop.yarn.util.SystemClock; //导入依赖的package包/类
private MapTaskImpl getMockMapTask(long clusterTimestamp, EventHandler eh) {

    ApplicationId appId = ApplicationId.newInstance(clusterTimestamp, 1);
    JobId jobId = MRBuilderUtils.newJobId(appId, 1);

    int partitions = 2;

    Path remoteJobConfFile = mock(Path.class);
    JobConf conf = new JobConf();
    TaskAttemptListener taskAttemptListener = mock(TaskAttemptListener.class);
    Token<JobTokenIdentifier> jobToken =
        (Token<JobTokenIdentifier>) mock(Token.class);
    Credentials credentials = null;
    Clock clock = new SystemClock();
    int appAttemptId = 3;
    MRAppMetrics metrics = mock(MRAppMetrics.class);
    Resource minContainerRequirements = mock(Resource.class);
    when(minContainerRequirements.getMemory()).thenReturn(1000);

    ClusterInfo clusterInfo = mock(ClusterInfo.class);
    AppContext appContext = mock(AppContext.class);
    when(appContext.getClusterInfo()).thenReturn(clusterInfo);

    TaskSplitMetaInfo taskSplitMetaInfo = mock(TaskSplitMetaInfo.class);
    MapTaskImpl mapTask = new MapTaskImpl(jobId, partitions,
        eh, remoteJobConfFile, conf,
        taskSplitMetaInfo, taskAttemptListener, jobToken, credentials, clock,
        appAttemptId, metrics, appContext);
    return mapTask;
  }
 
开发者ID:naver,项目名称:hadoop,代码行数:31,代码来源:TestRecovery.java

示例14: setup

import org.apache.hadoop.yarn.util.SystemClock; //导入依赖的package包/类
@Before 
@SuppressWarnings("unchecked")
public void setup() {
   dispatcher = new InlineDispatcher();
  
  ++startCount;
  
  conf = new JobConf();
  taskAttemptListener = mock(TaskAttemptListener.class);
  jobToken = (Token<JobTokenIdentifier>) mock(Token.class);
  remoteJobConfFile = mock(Path.class);
  credentials = null;
  clock = new SystemClock();
  metrics = mock(MRAppMetrics.class);  
  dataLocations = new String[1];
  
  appId = ApplicationId.newInstance(System.currentTimeMillis(), 1);

  jobId = Records.newRecord(JobId.class);
  jobId.setId(1);
  jobId.setAppId(appId);
  appContext = mock(AppContext.class);

  taskSplitMetaInfo = mock(TaskSplitMetaInfo.class);
  when(taskSplitMetaInfo.getLocations()).thenReturn(dataLocations); 
  
  taskAttempts = new ArrayList<MockTaskAttemptImpl>();    
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:TestTaskImpl.java

示例15: testTimeout

import org.apache.hadoop.yarn.util.SystemClock; //导入依赖的package包/类
@SuppressWarnings({ "rawtypes", "unchecked" })
@Test
public void testTimeout() throws InterruptedException {
  EventHandler mockHandler = mock(EventHandler.class);
  Clock clock = new SystemClock();
  TaskHeartbeatHandler hb = new TaskHeartbeatHandler(mockHandler, clock, 1);
  
  
  Configuration conf = new Configuration();
  conf.setInt(MRJobConfig.TASK_TIMEOUT, 10); //10 ms
  conf.setInt(MRJobConfig.TASK_TIMEOUT_CHECK_INTERVAL_MS, 10); //10 ms
  
  hb.init(conf);
  hb.start();
  try {
    ApplicationId appId = ApplicationId.newInstance(0l, 5);
    JobId jobId = MRBuilderUtils.newJobId(appId, 4);
    TaskId tid = MRBuilderUtils.newTaskId(jobId, 3, TaskType.MAP);
    TaskAttemptId taid = MRBuilderUtils.newTaskAttemptId(tid, 2);
    hb.register(taid);
    Thread.sleep(100);
    //Events only happen when the task is canceled
    verify(mockHandler, times(2)).handle(any(Event.class));
  } finally {
    hb.stop();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:28,代码来源:TestTaskHeartbeatHandler.java


注:本文中的org.apache.hadoop.yarn.util.SystemClock类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。