当前位置: 首页>>代码示例>>Java>>正文


Java TaskSplitMetaInfo类代码示例

本文整理汇总了Java中org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo的典型用法代码示例。如果您正苦于以下问题:Java TaskSplitMetaInfo类的具体用法?Java TaskSplitMetaInfo怎么用?Java TaskSplitMetaInfo使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


TaskSplitMetaInfo类属于org.apache.hadoop.mapreduce.split.JobSplit包,在下文中一共展示了TaskSplitMetaInfo类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: createMapTasks

import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo; //导入依赖的package包/类
private void createMapTasks(JobImpl job, long inputLength,
                            TaskSplitMetaInfo[] splits) {
  for (int i=0; i < job.numMapTasks; ++i) {
    TaskImpl task =
        new MapTaskImpl(job.jobId, i,
            job.eventHandler, 
            job.remoteJobConfFile, 
            job.conf, splits[i], 
            job.taskAttemptListener, 
            job.jobToken, job.jobCredentials,
            job.clock,
            job.applicationAttemptId.getAttemptId(),
            job.metrics, job.appContext);
    job.addTask(task);
  }
  LOG.info("Input size for job " + job.jobId + " = " + inputLength
      + ". Number of splits = " + splits.length);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:JobImpl.java

示例2: createRemoteTask

import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo; //导入依赖的package包/类
@Override
protected Task createRemoteTask() {
	 TaskSplitIndex splitIndex[] = new TaskSplitIndex[splitInfos.length];
	 int i=0;
	 for(TaskSplitMetaInfo splitInfo:splitInfos){
		 
		 splitIndex[i] = splitInfo.getSplitIndex();
		 
		 i++;
	 }
	 MapTask mapTask =
		      new MultiMapTask("", TypeConverter.fromYarn(getID()), partition,splitIndex, 1); // YARN doesn't have the concept of slots per task, set it as 1.
			  //new MultiMapTask();
		    mapTask.setUser(conf.get(MRJobConfig.USER_NAME));
		    mapTask.setConf(conf);
		    mapTask.setTaskType(TaskType.MULTI_MAP);
		    
   return mapTask;

}
 
开发者ID:yncxcw,项目名称:FlexMap,代码行数:21,代码来源:MultiMapTaskAttemptImpl.java

示例3: createMapTasks

import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo; //导入依赖的package包/类
private void createMapTasks(JobImpl job, long inputLength,
                            TaskSplitMetaInfo[] splits) {
  for (int i=0; i < job.numMapTasks; ++i) {
    TaskImpl task =
        new MapTaskImpl(job.jobId, i,
            job.eventHandler, 
            job.remoteJobConfFile, 
            job.conf, splits[i], 
            job.taskAttemptListener, 
            job.jobToken, job.jobCredentials,
            job.clock,
            job.applicationAttemptId.getAttemptId(),
            job.metrics, job.appContext);
    job.addTask(task);
  LOG.info("split info for task: "+i+"get split location: ");
  
  }
  
}
 
开发者ID:yncxcw,项目名称:FlexMap,代码行数:20,代码来源:JobImpl.java

示例4: SplitInfo

import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo; //导入依赖的package包/类
SplitInfo(TaskSplitMetaInfo taskSplitMetaInfo,String []topologyPath){
		
	this.taskSplitMetaInfo = taskSplitMetaInfo;
	this.length = taskSplitMetaInfo.getInputDataLength();
	this.hosts  = taskSplitMetaInfo.getLocations();
	
	assert(hosts.length==topologyPath.length||topologyPath.length==0);
	
	//if this fs does not have any rack information,use default rack
	if(topologyPath==null||topologyPath.length==0){
	  topologyPath = new String[hosts.length];	
	 	for(int i=0;i<hosts.length;i++){
	 		topologyPath[i]=(new NodeBase(hosts[i],NetworkTopology.DEFAULT_RACK)).toString();
	 	}	
	}
	
	//the topology pahts have the host name as the last component,strip it
	this.racks = new String[hosts.length];
	for(int i=0;i<racks.length;i++){
		
		this.racks[i]=(new NodeBase(topologyPath[i])).getNetworkLocation();
	}
	
}
 
开发者ID:yncxcw,项目名称:FlexMap,代码行数:25,代码来源:TaskDataProvision.java

示例5: MapTaskAttemptImpl

import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo; //导入依赖的package包/类
public MapTaskAttemptImpl(TaskId taskId, int attempt, 
    EventHandler eventHandler, Path jobFile, 
    int partition, TaskSplitMetaInfo splitInfo, JobConf conf,
    TaskAttemptListener taskAttemptListener, 
    Token<JobTokenIdentifier> jobToken,
    Credentials credentials, Clock clock,
    AppContext appContext) {
  super(taskId, attempt, eventHandler, 
      taskAttemptListener, jobFile, partition, conf, splitInfo.getLocations(),
      jobToken, credentials, clock, appContext);
  this.splitInfo = splitInfo;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:MapTaskAttemptImpl.java

示例6: MapTaskImpl

import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo; //导入依赖的package包/类
public MapTaskImpl(JobId jobId, int partition, EventHandler eventHandler,
    Path remoteJobConfFile, JobConf conf,
    TaskSplitMetaInfo taskSplitMetaInfo,
    TaskAttemptListener taskAttemptListener,
    Token<JobTokenIdentifier> jobToken,
    Credentials credentials, Clock clock,
    int appAttemptId, MRAppMetrics metrics, AppContext appContext) {
  super(jobId, TaskType.MAP, partition, eventHandler, remoteJobConfFile,
      conf, taskAttemptListener, jobToken, credentials, clock,
      appAttemptId, metrics, appContext);
  this.taskSplitMetaInfo = taskSplitMetaInfo;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:MapTaskImpl.java

示例7: createSplits

import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo; //导入依赖的package包/类
protected TaskSplitMetaInfo[] createSplits(JobImpl job, JobId jobId) {
  TaskSplitMetaInfo[] allTaskSplitMetaInfo;
  try {
    allTaskSplitMetaInfo = SplitMetaInfoReader.readSplitMetaInfo(
        job.oldJobId, job.fs, 
        job.conf, 
        job.remoteJobSubmitDir);
  } catch (IOException e) {
    throw new YarnRuntimeException(e);
  }
  return allTaskSplitMetaInfo;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:JobImpl.java

示例8: getMockMapTask

import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo; //导入依赖的package包/类
private MapTaskImpl getMockMapTask(long clusterTimestamp, EventHandler eh) {

    ApplicationId appId = ApplicationId.newInstance(clusterTimestamp, 1);
    JobId jobId = MRBuilderUtils.newJobId(appId, 1);

    int partitions = 2;

    Path remoteJobConfFile = mock(Path.class);
    JobConf conf = new JobConf();
    TaskAttemptListener taskAttemptListener = mock(TaskAttemptListener.class);
    Token<JobTokenIdentifier> jobToken =
        (Token<JobTokenIdentifier>) mock(Token.class);
    Credentials credentials = null;
    Clock clock = new SystemClock();
    int appAttemptId = 3;
    MRAppMetrics metrics = mock(MRAppMetrics.class);
    Resource minContainerRequirements = mock(Resource.class);
    when(minContainerRequirements.getMemory()).thenReturn(1000);

    ClusterInfo clusterInfo = mock(ClusterInfo.class);
    AppContext appContext = mock(AppContext.class);
    when(appContext.getClusterInfo()).thenReturn(clusterInfo);

    TaskSplitMetaInfo taskSplitMetaInfo = mock(TaskSplitMetaInfo.class);
    MapTaskImpl mapTask = new MapTaskImpl(jobId, partitions,
        eh, remoteJobConfFile, conf,
        taskSplitMetaInfo, taskAttemptListener, jobToken, credentials, clock,
        appAttemptId, metrics, appContext);
    return mapTask;
  }
 
开发者ID:naver,项目名称:hadoop,代码行数:31,代码来源:TestRecovery.java

示例9: testSingleRackRequest

import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo; //导入依赖的package包/类
@Test
public void testSingleRackRequest() throws Exception {
  TaskAttemptImpl.RequestContainerTransition rct =
      new TaskAttemptImpl.RequestContainerTransition(false);

  EventHandler eventHandler = mock(EventHandler.class);
  String[] hosts = new String[3];
  hosts[0] = "host1";
  hosts[1] = "host2";
  hosts[2] = "host3";
  TaskSplitMetaInfo splitInfo =
      new TaskSplitMetaInfo(hosts, 0, 128 * 1024 * 1024l);

  TaskAttemptImpl mockTaskAttempt =
      createMapTaskAttemptImplForTest(eventHandler, splitInfo);
  TaskAttemptEvent mockTAEvent = mock(TaskAttemptEvent.class);

  rct.transition(mockTaskAttempt, mockTAEvent);

  ArgumentCaptor<Event> arg = ArgumentCaptor.forClass(Event.class);
  verify(eventHandler, times(2)).handle(arg.capture());
  if (!(arg.getAllValues().get(1) instanceof ContainerRequestEvent)) {
    Assert.fail("Second Event not of type ContainerRequestEvent");
  }
  ContainerRequestEvent cre =
      (ContainerRequestEvent) arg.getAllValues().get(1);
  String[] requestedRacks = cre.getRacks();
  //Only a single occurrence of /DefaultRack
  assertEquals(1, requestedRacks.length);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:31,代码来源:TestTaskAttempt.java

示例10: testHostResolveAttempt

import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo; //导入依赖的package包/类
@Test
public void testHostResolveAttempt() throws Exception {
  TaskAttemptImpl.RequestContainerTransition rct =
      new TaskAttemptImpl.RequestContainerTransition(false);

  EventHandler eventHandler = mock(EventHandler.class);
  String[] hosts = new String[3];
  hosts[0] = "192.168.1.1";
  hosts[1] = "host2";
  hosts[2] = "host3";
  TaskSplitMetaInfo splitInfo =
      new TaskSplitMetaInfo(hosts, 0, 128 * 1024 * 1024l);

  TaskAttemptImpl mockTaskAttempt =
      createMapTaskAttemptImplForTest(eventHandler, splitInfo);
  TaskAttemptImpl spyTa = spy(mockTaskAttempt);
  when(spyTa.resolveHost(hosts[0])).thenReturn("host1");
  spyTa.dataLocalHosts = spyTa.resolveHosts(splitInfo.getLocations());

  TaskAttemptEvent mockTAEvent = mock(TaskAttemptEvent.class);
  rct.transition(spyTa, mockTAEvent);
  verify(spyTa).resolveHost(hosts[0]);
  ArgumentCaptor<Event> arg = ArgumentCaptor.forClass(Event.class);
  verify(eventHandler, times(2)).handle(arg.capture());
  if (!(arg.getAllValues().get(1) instanceof ContainerRequestEvent)) {
    Assert.fail("Second Event not of type ContainerRequestEvent");
  }
  Map<String, Boolean> expected = new HashMap<String, Boolean>();
  expected.put("host1", true);
  expected.put("host2", true);
  expected.put("host3", true);
  ContainerRequestEvent cre =
      (ContainerRequestEvent) arg.getAllValues().get(1);
  String[] requestedHosts = cre.getHosts();
  for (String h : requestedHosts) {
    expected.remove(h);
  }
  assertEquals(0, expected.size());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:40,代码来源:TestTaskAttempt.java

示例11: createMapTaskAttemptImplForTest

import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo; //导入依赖的package包/类
private TaskAttemptImpl createMapTaskAttemptImplForTest(
    EventHandler eventHandler, TaskSplitMetaInfo taskSplitMetaInfo, Clock clock) {
  ApplicationId appId = ApplicationId.newInstance(1, 1);
  JobId jobId = MRBuilderUtils.newJobId(appId, 1);
  TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
  TaskAttemptListener taListener = mock(TaskAttemptListener.class);
  Path jobFile = mock(Path.class);
  JobConf jobConf = new JobConf();
  TaskAttemptImpl taImpl =
      new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1,
          taskSplitMetaInfo, jobConf, taListener, null,
          null, clock, null);
  return taImpl;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:15,代码来源:TestTaskAttempt.java

示例12: getInitTransition

import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo; //导入依赖的package包/类
private static InitTransition getInitTransition(final int numSplits) {
  InitTransition initTransition = new InitTransition() {
    @Override
    protected TaskSplitMetaInfo[] createSplits(JobImpl job, JobId jobId) {
      TaskSplitMetaInfo[] splits = new TaskSplitMetaInfo[numSplits];
      for (int i = 0; i < numSplits; ++i) {
        splits[i] = new TaskSplitMetaInfo();
      }
      return splits;
    }
  };
  return initTransition;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:14,代码来源:TestJobImpl.java

示例13: setup

import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo; //导入依赖的package包/类
@Before 
@SuppressWarnings("unchecked")
public void setup() {
   dispatcher = new InlineDispatcher();
  
  ++startCount;
  
  conf = new JobConf();
  taskAttemptListener = mock(TaskAttemptListener.class);
  jobToken = (Token<JobTokenIdentifier>) mock(Token.class);
  remoteJobConfFile = mock(Path.class);
  credentials = null;
  clock = new SystemClock();
  metrics = mock(MRAppMetrics.class);  
  dataLocations = new String[1];
  
  appId = ApplicationId.newInstance(System.currentTimeMillis(), 1);

  jobId = Records.newRecord(JobId.class);
  jobId.setId(1);
  jobId.setAppId(appId);
  appContext = mock(AppContext.class);

  taskSplitMetaInfo = mock(TaskSplitMetaInfo.class);
  when(taskSplitMetaInfo.getLocations()).thenReturn(dataLocations); 
  
  taskAttempts = new ArrayList<MockTaskAttemptImpl>();    
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:TestTaskImpl.java

示例14: createSplits

import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo; //导入依赖的package包/类
@Override
protected TaskSplitMetaInfo[] createSplits(JobImpl job, JobId jobId) {
  TaskSplitMetaInfo[] splits = new TaskSplitMetaInfo[maps];
  for (int i = 0; i < maps ; i++) {
    splits[i] = new TaskSplitMetaInfo();
  }
  return splits;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:9,代码来源:MRApp.java

示例15: MapTaskRunnable

import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo; //导入依赖的package包/类
public MapTaskRunnable(TaskSplitMetaInfo info, int taskId, JobID jobId,
    Map<TaskAttemptID, MapOutputFile> mapOutputFiles) {
  this.info = info;
  this.taskId = taskId;
  this.mapOutputFiles = mapOutputFiles;
  this.jobId = jobId;
  this.localConf = new JobConf(job);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:9,代码来源:LocalJobRunner.java


注:本文中的org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。