本文整理汇总了Java中org.apache.hadoop.mapreduce.v2.app.AppContext类的典型用法代码示例。如果您正苦于以下问题:Java AppContext类的具体用法?Java AppContext怎么用?Java AppContext使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
AppContext类属于org.apache.hadoop.mapreduce.v2.app包,在下文中一共展示了AppContext类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: createStubbedJob
import org.apache.hadoop.mapreduce.v2.app.AppContext; //导入依赖的package包/类
private static StubbedJob createStubbedJob(Configuration conf,
Dispatcher dispatcher, int numSplits, AppContext appContext) {
JobID jobID = JobID.forName("job_1234567890000_0001");
JobId jobId = TypeConverter.toYarn(jobID);
if (appContext == null) {
appContext = mock(AppContext.class);
when(appContext.hasSuccessfullyUnregistered()).thenReturn(true);
}
StubbedJob job = new StubbedJob(jobId,
ApplicationAttemptId.newInstance(ApplicationId.newInstance(0, 0), 0),
conf,dispatcher.getEventHandler(), true, "somebody", numSplits, appContext);
dispatcher.register(JobEventType.class, job);
EventHandler mockHandler = mock(EventHandler.class);
dispatcher.register(TaskEventType.class, mockHandler);
dispatcher.register(org.apache.hadoop.mapreduce.jobhistory.EventType.class,
mockHandler);
dispatcher.register(JobFinishEvent.Type.class, mockHandler);
return job;
}
示例2: super
import org.apache.hadoop.mapreduce.v2.app.AppContext; //导入依赖的package包/类
public DefaultSpeculator
(Configuration conf, AppContext context,
TaskRuntimeEstimator estimator, Clock clock) {
super(DefaultSpeculator.class.getName());
this.conf = conf;
this.context = context;
this.estimator = estimator;
this.clock = clock;
this.eventHandler = context.getEventHandler();
this.soonestRetryAfterNoSpeculate =
conf.getLong(MRJobConfig.SPECULATIVE_RETRY_AFTER_NO_SPECULATE,
MRJobConfig.DEFAULT_SPECULATIVE_RETRY_AFTER_NO_SPECULATE);
this.soonestRetryAfterSpeculate =
conf.getLong(MRJobConfig.SPECULATIVE_RETRY_AFTER_SPECULATE,
MRJobConfig.DEFAULT_SPECULATIVE_RETRY_AFTER_SPECULATE);
this.proportionRunningTasksSpeculatable =
conf.getDouble(MRJobConfig.SPECULATIVECAP_RUNNING_TASKS,
MRJobConfig.DEFAULT_SPECULATIVECAP_RUNNING_TASKS);
this.proportionTotalTasksSpeculatable =
conf.getDouble(MRJobConfig.SPECULATIVECAP_TOTAL_TASKS,
MRJobConfig.DEFAULT_SPECULATIVECAP_TOTAL_TASKS);
this.minimumAllowedSpeculativeTasks =
conf.getInt(MRJobConfig.SPECULATIVE_MINIMUM_ALLOWED_TASKS,
MRJobConfig.DEFAULT_SPECULATIVE_MINIMUM_ALLOWED_TASKS);
}
示例3: JobCounterInfo
import org.apache.hadoop.mapreduce.v2.app.AppContext; //导入依赖的package包/类
public JobCounterInfo(AppContext ctx, Job job) {
getCounters(ctx, job);
counterGroup = new ArrayList<CounterGroupInfo>();
this.id = MRApps.toString(job.getID());
if (total != null) {
for (CounterGroup g : total) {
if (g != null) {
CounterGroup mg = map == null ? null : map.getGroup(g.getName());
CounterGroup rg = reduce == null ? null : reduce
.getGroup(g.getName());
CounterGroupInfo cginfo = new CounterGroupInfo(g.getName(), g,
mg, rg);
counterGroup.add(cginfo);
}
}
}
}
示例4: configureServlets
import org.apache.hadoop.mapreduce.v2.app.AppContext; //导入依赖的package包/类
@Override
protected void configureServlets() {
appContext = new MockHistoryContext(0, 1, 2, 1);
webApp = mock(HsWebApp.class);
when(webApp.name()).thenReturn("hsmockwebapp");
bind(JAXBContextResolver.class);
bind(HsWebServices.class);
bind(GenericExceptionHandler.class);
bind(WebApp.class).toInstance(webApp);
bind(AppContext.class).toInstance(appContext);
bind(HistoryContext.class).toInstance(appContext);
bind(Configuration.class).toInstance(conf);
serve("/*").with(GuiceContainer.class);
}
示例5: createAppContext
import org.apache.hadoop.mapreduce.v2.app.AppContext; //导入依赖的package包/类
private static AppContext createAppContext() {
ApplicationId appId = ApplicationId.newInstance(1, 1);
ApplicationAttemptId attemptId =
ApplicationAttemptId.newInstance(appId, 1);
Job job = mock(Job.class);
@SuppressWarnings("rawtypes")
EventHandler eventHandler = mock(EventHandler.class);
AppContext ctx = mock(AppContext.class);
when(ctx.getApplicationID()).thenReturn(appId);
when(ctx.getApplicationAttemptId()).thenReturn(attemptId);
when(ctx.getJob(isA(JobId.class))).thenReturn(job);
when(ctx.getClusterInfo()).thenReturn(
new ClusterInfo(Resource.newInstance(10240, 1, 0)));
when(ctx.getEventHandler()).thenReturn(eventHandler);
return ctx;
}
示例6: verifyAMInfoXML
import org.apache.hadoop.mapreduce.v2.app.AppContext; //导入依赖的package包/类
public void verifyAMInfoXML(String xml, AppContext ctx)
throws JSONException, Exception {
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
DocumentBuilder db = dbf.newDocumentBuilder();
InputSource is = new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom = db.parse(is);
NodeList nodes = dom.getElementsByTagName("info");
assertEquals("incorrect number of elements", 1, nodes.getLength());
for (int i = 0; i < nodes.getLength(); i++) {
Element element = (Element) nodes.item(i);
verifyAMInfoGeneric(ctx,
WebServicesTestUtils.getXmlString(element, "appId"),
WebServicesTestUtils.getXmlString(element, "user"),
WebServicesTestUtils.getXmlString(element, "name"),
WebServicesTestUtils.getXmlLong(element, "startedOn"),
WebServicesTestUtils.getXmlLong(element, "elapsedTime"));
}
}
示例7: testSingleTaskCounterView
import org.apache.hadoop.mapreduce.v2.app.AppContext; //导入依赖的package包/类
@Test public void testSingleTaskCounterView() {
AppContext appContext = new MockAppContext(0, 1, 1, 2);
Map<String, String> params = getTaskParams(appContext);
params.put(AMParams.COUNTER_GROUP,
"org.apache.hadoop.mapreduce.FileSystemCounter");
params.put(AMParams.COUNTER_NAME, "HDFS_WRITE_OPS");
// remove counters from one task attempt
// to test handling of missing counters
TaskId taskID = MRApps.toTaskID(params.get(AMParams.TASK_ID));
Job job = appContext.getJob(taskID.getJobId());
Task task = job.getTask(taskID);
TaskAttempt attempt = task.getAttempts().values().iterator().next();
attempt.getReport().setCounters(null);
WebAppTests.testPage(SingleCounterPage.class, AppContext.class,
appContext, params);
}
示例8: createAppContext
import org.apache.hadoop.mapreduce.v2.app.AppContext; //导入依赖的package包/类
private static AppContext createAppContext(
ApplicationAttemptId appAttemptId, Job job) {
AppContext context = mock(AppContext.class);
ApplicationId appId = appAttemptId.getApplicationId();
when(context.getApplicationID()).thenReturn(appId);
when(context.getApplicationAttemptId()).thenReturn(appAttemptId);
when(context.getJob(isA(JobId.class))).thenReturn(job);
when(context.getClusterInfo()).thenReturn(
new ClusterInfo(Resource.newInstance(10240, 1)));
when(context.getEventHandler()).thenReturn(new EventHandler() {
@Override
public void handle(Event event) {
// Only capture interesting events.
if (event instanceof TaskAttemptContainerAssignedEvent) {
events.add((TaskAttemptContainerAssignedEvent) event);
} else if (event instanceof TaskAttemptKillEvent) {
taskAttemptKillEvents.add((TaskAttemptKillEvent)event);
} else if (event instanceof JobUpdatedNodesEvent) {
jobUpdatedNodeEvents.add((JobUpdatedNodesEvent)event);
} else if (event instanceof JobEvent) {
jobEvents.add((JobEvent)event);
}
}
});
return context;
}
示例9: configureServlets
import org.apache.hadoop.mapreduce.v2.app.AppContext; //导入依赖的package包/类
@Override
protected void configureServlets() {
appContext = new MockHistoryContext(0, 1, 1, 1);
JobHistory jobHistoryService = new JobHistory();
HistoryContext historyContext = (HistoryContext) jobHistoryService;
webApp = new HsWebApp(historyContext);
bind(JAXBContextResolver.class);
bind(HsWebServices.class);
bind(GenericExceptionHandler.class);
bind(WebApp.class).toInstance(webApp);
bind(AppContext.class).toInstance(appContext);
bind(HistoryContext.class).toInstance(appContext);
bind(Configuration.class).toInstance(conf);
serve("/*").with(GuiceContainer.class);
}
示例10: testTransitionsAtFailed
import org.apache.hadoop.mapreduce.v2.app.AppContext; //导入依赖的package包/类
@Test
public void testTransitionsAtFailed() throws IOException {
Configuration conf = new Configuration();
AsyncDispatcher dispatcher = new AsyncDispatcher();
dispatcher.init(conf);
dispatcher.start();
OutputCommitter committer = mock(OutputCommitter.class);
doThrow(new IOException("forcefail"))
.when(committer).setupJob(any(JobContext.class));
CommitterEventHandler commitHandler =
createCommitterEventHandler(dispatcher, committer);
commitHandler.init(conf);
commitHandler.start();
AppContext mockContext = mock(AppContext.class);
when(mockContext.hasSuccessfullyUnregistered()).thenReturn(false);
JobImpl job = createStubbedJob(conf, dispatcher, 2, mockContext);
JobId jobId = job.getID();
job.handle(new JobEvent(jobId, JobEventType.JOB_INIT));
assertJobState(job, JobStateInternal.INITED);
job.handle(new JobStartEvent(jobId));
assertJobState(job, JobStateInternal.FAILED);
job.handle(new JobEvent(jobId, JobEventType.JOB_TASK_COMPLETED));
assertJobState(job, JobStateInternal.FAILED);
job.handle(new JobEvent(jobId, JobEventType.JOB_TASK_ATTEMPT_COMPLETED));
assertJobState(job, JobStateInternal.FAILED);
job.handle(new JobEvent(jobId, JobEventType.JOB_MAP_TASK_RESCHEDULED));
assertJobState(job, JobStateInternal.FAILED);
job.handle(new JobEvent(jobId, JobEventType.JOB_TASK_ATTEMPT_FETCH_FAILURE));
assertJobState(job, JobStateInternal.FAILED);
Assert.assertEquals(JobState.RUNNING, job.getState());
when(mockContext.hasSuccessfullyUnregistered()).thenReturn(true);
Assert.assertEquals(JobState.FAILED, job.getState());
dispatcher.stop();
commitHandler.stop();
}
示例11: createCommitterEventHandler
import org.apache.hadoop.mapreduce.v2.app.AppContext; //导入依赖的package包/类
private static CommitterEventHandler createCommitterEventHandler(
Dispatcher dispatcher, OutputCommitter committer) {
final SystemClock clock = new SystemClock();
AppContext appContext = mock(AppContext.class);
when(appContext.getEventHandler()).thenReturn(
dispatcher.getEventHandler());
when(appContext.getClock()).thenReturn(clock);
RMHeartbeatHandler heartbeatHandler = new RMHeartbeatHandler() {
@Override
public long getLastHeartbeatTime() {
return clock.getTime();
}
@Override
public void runOnNextHeartbeat(Runnable callback) {
callback.run();
}
};
ApplicationAttemptId id =
ConverterUtils.toApplicationAttemptId("appattempt_1234567890000_0001_0");
when(appContext.getApplicationID()).thenReturn(id.getApplicationId());
when(appContext.getApplicationAttemptId()).thenReturn(id);
CommitterEventHandler handler =
new CommitterEventHandler(appContext, committer, heartbeatHandler);
dispatcher.register(CommitterEventType.class, handler);
return handler;
}
示例12: StubbedJob
import org.apache.hadoop.mapreduce.v2.app.AppContext; //导入依赖的package包/类
public StubbedJob(JobId jobId, ApplicationAttemptId applicationAttemptId,
Configuration conf, EventHandler eventHandler, boolean newApiCommitter,
String user, int numSplits, AppContext appContext) {
super(jobId, applicationAttemptId, conf, eventHandler,
null, new JobTokenSecretManager(), new Credentials(),
new SystemClock(), Collections.<TaskId, TaskInfo> emptyMap(),
MRAppMetrics.create(), null, newApiCommitter, user,
System.currentTimeMillis(), null, appContext, null, null);
initTransition = getInitTransition(numSplits);
localFactory = stateMachineFactory.addTransition(JobStateInternal.NEW,
EnumSet.of(JobStateInternal.INITED, JobStateInternal.FAILED),
JobEventType.JOB_INIT,
// This is abusive.
initTransition);
// This "this leak" is okay because the retained pointer is in an
// instance variable.
localStateMachine = localFactory.make(this);
}
示例13: testLogsView1
import org.apache.hadoop.mapreduce.v2.app.AppContext; //导入依赖的package包/类
@Test
public void testLogsView1() throws IOException {
LOG.info("HsLogsPage");
Injector injector =
WebAppTests.testPage(AggregatedLogsPage.class, AppContext.class,
new MockAppContext(0, 1, 1, 1));
PrintWriter spyPw = WebAppTests.getPrintWriter(injector);
verify(spyPw).write("Cannot get container logs without a ContainerId");
verify(spyPw).write("Cannot get container logs without a NodeId");
verify(spyPw).write("Cannot get container logs without an app owner");
}
示例14: LocalContainerLauncher
import org.apache.hadoop.mapreduce.v2.app.AppContext; //导入依赖的package包/类
public LocalContainerLauncher(AppContext context,
TaskUmbilicalProtocol umbilical) {
super(LocalContainerLauncher.class.getName());
this.context = context;
this.umbilical = umbilical;
// umbilical: MRAppMaster creates (taskAttemptListener), passes to us
// (TODO/FIXME: pointless to use RPC to talk to self; should create
// LocalTaskAttemptListener or similar: implement umbilical protocol
// but skip RPC stuff)
try {
curFC = FileContext.getFileContext(curDir.toURI());
} catch (UnsupportedFileSystemException ufse) {
LOG.error("Local filesystem " + curDir.toURI().toString()
+ " is unsupported?? (should never happen)");
}
// Save list of files/dirs that are supposed to be present so can delete
// any extras created by one task before starting subsequent task. Note
// that there's no protection against deleted or renamed localization;
// users who do that get what they deserve (and will have to disable
// uberization in order to run correctly).
File[] curLocalFiles = curDir.listFiles();
localizedFiles = new HashSet<File>(curLocalFiles.length);
for (int j = 0; j < curLocalFiles.length; ++j) {
localizedFiles.add(curLocalFiles[j]);
}
// Relocalization note/future FIXME (per chrisdo, 20110315): At moment,
// full localization info is in AppSubmissionContext passed from client to
// RM and then to NM for AM-container launch: no difference between AM-
// localization and MapTask- or ReduceTask-localization, so can assume all
// OK. Longer-term, will need to override uber-AM container-localization
// request ("needed resources") with union of regular-AM-resources + task-
// resources (and, if maps and reduces ever differ, then union of all three
// types), OR will need localizer service/API that uber-AM can request
// after running (e.g., "localizeForTask()" or "localizeForMapTask()").
}
示例15: MapTaskAttemptImpl
import org.apache.hadoop.mapreduce.v2.app.AppContext; //导入依赖的package包/类
public MapTaskAttemptImpl(TaskId taskId, int attempt,
EventHandler eventHandler, Path jobFile,
int partition, TaskSplitMetaInfo splitInfo, JobConf conf,
TaskAttemptListener taskAttemptListener,
Token<JobTokenIdentifier> jobToken,
Credentials credentials, Clock clock,
AppContext appContext) {
super(taskId, attempt, eventHandler,
taskAttemptListener, jobFile, partition, conf, splitInfo.getLocations(),
jobToken, credentials, clock, appContext);
this.splitInfo = splitInfo;
}