本文整理汇总了Java中org.apache.hadoop.yarn.api.records.ApplicationId.newInstance方法的典型用法代码示例。如果您正苦于以下问题:Java ApplicationId.newInstance方法的具体用法?Java ApplicationId.newInstance怎么用?Java ApplicationId.newInstance使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.yarn.api.records.ApplicationId
的用法示例。
在下文中一共展示了ApplicationId.newInstance方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testDeletionofStagingOnKill
import org.apache.hadoop.yarn.api.records.ApplicationId; //导入方法依赖的package包/类
@Test (timeout = 30000)
public void testDeletionofStagingOnKill() throws IOException {
conf.set(MRJobConfig.MAPREDUCE_JOB_DIR, stagingJobDir);
fs = mock(FileSystem.class);
when(fs.delete(any(Path.class), anyBoolean())).thenReturn(true);
//Staging Dir exists
String user = UserGroupInformation.getCurrentUser().getShortUserName();
Path stagingDir = MRApps.getStagingAreaDir(conf, user);
when(fs.exists(stagingDir)).thenReturn(true);
ApplicationId appId = ApplicationId.newInstance(System.currentTimeMillis(),
0);
ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 0);
JobId jobid = recordFactory.newRecordInstance(JobId.class);
jobid.setAppId(appId);
ContainerAllocator mockAlloc = mock(ContainerAllocator.class);
MRAppMaster appMaster = new TestMRApp(attemptId, mockAlloc);
appMaster.init(conf);
//simulate the process being killed
MRAppMaster.MRAppMasterShutdownHook hook =
new MRAppMaster.MRAppMasterShutdownHook(appMaster);
hook.run();
verify(fs, times(0)).delete(stagingJobPath, true);
}
示例2: testGetApplicationReport
import org.apache.hadoop.yarn.api.records.ApplicationId; //导入方法依赖的package包/类
@Test(timeout = 10000)
public void testGetApplicationReport() throws YarnException, IOException {
Configuration conf = new Configuration();
final AHSClient client = new MockAHSClient();
client.init(conf);
client.start();
List<ApplicationReport> expectedReports =
((MockAHSClient) client).getReports();
ApplicationId applicationId = ApplicationId.newInstance(1234, 5);
ApplicationReport report = client.getApplicationReport(applicationId);
Assert.assertEquals(report, expectedReports.get(0));
Assert.assertEquals(report.getApplicationId().toString(), expectedReports
.get(0).getApplicationId().toString());
client.stop();
}
示例3: getNewApplication
import org.apache.hadoop.yarn.api.records.ApplicationId; //导入方法依赖的package包/类
@Override
public NewApplicationResponse getNewApplication() throws IOException {
NewApplicationResponse response = new NewApplicationResponse();
int priority = conf.getInt(YARN_APPLICATION_HPC_AM_PRIORITY,
DEFAULT_YARN_APPLICATION_HPC_AM_PRIORITY);
int amMemory = conf.getInt(YARN_APPLICATION_HPC_AM_RESOURCE_MEMORY_MB,
DEFAULT_YARN_APPLICATION_HPC_AM_RESOURCE_MEMORY_MB);
int cpus = conf.getInt(YARN_APPLICATION_HPC_AM_RESOURCE_CPU_VCORES,
DEFAULT_YARN_APPLICATION_HPC_AM_RESOURCE_CPU_VCORES);
SocketWrapper socket = SocketFactory.createSocket();
String hostName = socket.getHostName();
int port = socket.getPort();
int jobid = PBSCommandExecutor.submitAndGetPBSJobId(conf, priority,
amMemory, cpus, hostName, port);
SocketCache.addSocket(jobid, socket);
ApplicationId applicationId = ApplicationId.newInstance(
getClusterTimestamp(), jobid);
response.setApplicationId(applicationId);
response.setMaxCapability(getMaxCapability());
response.setMinCapability(getMinCapability());
return response;
}
示例4: testMassiveWriteContainerHistory
import org.apache.hadoop.yarn.api.records.ApplicationId; //导入方法依赖的package包/类
@Test
public void testMassiveWriteContainerHistory() throws IOException {
long mb = 1024 * 1024;
Runtime runtime = Runtime.getRuntime();
long usedMemoryBefore = (runtime.totalMemory() - runtime.freeMemory()) / mb;
int numContainers = 100000;
ApplicationId appId = ApplicationId.newInstance(0, 1);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 1);
for (int i = 1; i <= numContainers; ++i) {
ContainerId containerId = ContainerId.newContainerId(appAttemptId, i);
writeContainerStartData(containerId);
writeContainerFinishData(containerId);
}
long usedMemoryAfter = (runtime.totalMemory() - runtime.freeMemory()) / mb;
Assert.assertTrue((usedMemoryAfter - usedMemoryBefore) < 400);
}
示例5: toApplicationAttemptId
import org.apache.hadoop.yarn.api.records.ApplicationId; //导入方法依赖的package包/类
private static ApplicationAttemptId toApplicationAttemptId(
Iterator<String> it) throws NumberFormatException {
ApplicationId appId = ApplicationId.newInstance(Long.parseLong(it.next()),
Integer.parseInt(it.next()));
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, Integer.parseInt(it.next()));
return appAttemptId;
}
示例6: setUp
import org.apache.hadoop.yarn.api.records.ApplicationId; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {
resourceMgrDelegate = mock(ResourceMgrDelegate.class);
conf = new YarnConfiguration();
conf.set(YarnConfiguration.RM_PRINCIPAL, "mapred/[email protected]");
clientCache = new ClientCache(conf, resourceMgrDelegate);
clientCache = spy(clientCache);
yarnRunner = new YARNRunner(conf, resourceMgrDelegate, clientCache);
yarnRunner = spy(yarnRunner);
submissionContext = mock(ApplicationSubmissionContext.class);
doAnswer(
new Answer<ApplicationSubmissionContext>() {
@Override
public ApplicationSubmissionContext answer(InvocationOnMock invocation)
throws Throwable {
return submissionContext;
}
}
).when(yarnRunner).createApplicationSubmissionContext(any(Configuration.class),
any(String.class), any(Credentials.class));
appId = ApplicationId.newInstance(System.currentTimeMillis(), 1);
jobId = TypeConverter.fromYarn(appId);
if (testWorkDir.exists()) {
FileContext.getLocalFSFileContext().delete(new Path(testWorkDir.toString()), true);
}
testWorkDir.mkdirs();
}
示例7: testCheckAccessWithNullACLS
import org.apache.hadoop.yarn.api.records.ApplicationId; //导入方法依赖的package包/类
@Test
public void testCheckAccessWithNullACLS() {
Configuration conf = new Configuration();
conf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE,
true);
conf.set(YarnConfiguration.YARN_ADMIN_ACL,
ADMIN_USER);
ApplicationACLsManager aclManager = new ApplicationACLsManager(conf);
UserGroupInformation appOwner = UserGroupInformation
.createRemoteUser(APP_OWNER);
ApplicationId appId = ApplicationId.newInstance(1, 1);
//Application ACL is not added
//Application Owner should have all access even if Application ACL is not added
assertTrue(aclManager.checkAccess(appOwner, ApplicationAccessType.MODIFY_APP,
APP_OWNER, appId));
assertTrue(aclManager.checkAccess(appOwner, ApplicationAccessType.VIEW_APP,
APP_OWNER, appId));
//Admin should have all access
UserGroupInformation adminUser = UserGroupInformation
.createRemoteUser(ADMIN_USER);
assertTrue(aclManager.checkAccess(adminUser, ApplicationAccessType.VIEW_APP,
APP_OWNER, appId));
assertTrue(aclManager.checkAccess(adminUser, ApplicationAccessType.MODIFY_APP,
APP_OWNER, appId));
// A regular user should Not have access
UserGroupInformation testUser1 = UserGroupInformation
.createRemoteUser(TESTUSER1);
assertFalse(aclManager.checkAccess(testUser1, ApplicationAccessType.VIEW_APP,
APP_OWNER, appId));
assertFalse(aclManager.checkAccess(testUser1, ApplicationAccessType.MODIFY_APP,
APP_OWNER, appId));
}
示例8: testDeletionofStagingOnUnregistrationFailure
import org.apache.hadoop.yarn.api.records.ApplicationId; //导入方法依赖的package包/类
@SuppressWarnings("resource")
private void testDeletionofStagingOnUnregistrationFailure(
int maxAttempts, boolean shouldHaveDeleted) throws IOException {
conf.set(MRJobConfig.MAPREDUCE_JOB_DIR, stagingJobDir);
fs = mock(FileSystem.class);
when(fs.delete(any(Path.class), anyBoolean())).thenReturn(true);
//Staging Dir exists
String user = UserGroupInformation.getCurrentUser().getShortUserName();
Path stagingDir = MRApps.getStagingAreaDir(conf, user);
when(fs.exists(stagingDir)).thenReturn(true);
ApplicationId appId = ApplicationId.newInstance(0, 1);
ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 1);
JobId jobid = recordFactory.newRecordInstance(JobId.class);
jobid.setAppId(appId);
TestMRApp appMaster = new TestMRApp(attemptId, null,
JobStateInternal.RUNNING, maxAttempts);
appMaster.crushUnregistration = true;
appMaster.init(conf);
appMaster.start();
appMaster.shutDownJob();
((RunningAppContext) appMaster.getContext()).resetIsLastAMRetry();
if (shouldHaveDeleted) {
Assert.assertEquals(new Boolean(true), appMaster.isLastAMRetry());
verify(fs).delete(stagingJobPath, true);
} else {
Assert.assertEquals(new Boolean(false), appMaster.isLastAMRetry());
verify(fs, never()).delete(stagingJobPath, true);
}
}
示例9: testInvalidApp
import org.apache.hadoop.yarn.api.records.ApplicationId; //导入方法依赖的package包/类
@Test
public void testInvalidApp() {
ApplicationId appId = ApplicationId.newInstance(0, MAX_APPS + 1);
WebResource r = resource();
ClientResponse response =
r.path("ws").path("v1").path("applicationhistory").path("apps")
.path(appId.toString())
.queryParam("user.name", USERS[round])
.accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals("404 not found expected", Status.NOT_FOUND,
response.getClientResponseStatus());
}
示例10: testRegisterNodeManagerRequest
import org.apache.hadoop.yarn.api.records.ApplicationId; //导入方法依赖的package包/类
@Test
public void testRegisterNodeManagerRequest() {
ApplicationId appId = ApplicationId.newInstance(123456789, 1);
ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 1);
ContainerId containerId = ContainerId.newContainerId(attemptId, 1);
NMContainerStatus containerReport =
NMContainerStatus.newInstance(containerId,
ContainerState.RUNNING, Resource.newInstance(1024, 1, 2), "diagnostics",
0, Priority.newInstance(10), 1234);
List<NMContainerStatus> reports = Arrays.asList(containerReport);
RegisterNodeManagerRequest request =
RegisterNodeManagerRequest.newInstance(
NodeId.newInstance("1.1.1.1", 1000), 8080,
Resource.newInstance(1024, 1, 2), "NM-version-id", reports,
Arrays.asList(appId));
RegisterNodeManagerRequest requestProto =
new RegisterNodeManagerRequestPBImpl(
((RegisterNodeManagerRequestPBImpl) request).getProto());
Assert.assertEquals(containerReport, requestProto
.getNMContainerStatuses().get(0));
Assert.assertEquals(8080, requestProto.getHttpPort());
Assert.assertEquals("NM-version-id", requestProto.getNMVersion());
Assert.assertEquals(NodeId.newInstance("1.1.1.1", 1000),
requestProto.getNodeId());
Assert.assertEquals(Resource.newInstance(1024, 1, 2),
requestProto.getResource());
Assert.assertEquals(1, requestProto.getRunningApplications().size());
Assert.assertEquals(appId, requestProto.getRunningApplications().get(0));
}
示例11: testContainerCleanedWhileCommitting
import org.apache.hadoop.yarn.api.records.ApplicationId; //导入方法依赖的package包/类
@Test
public void testContainerCleanedWhileCommitting() throws Exception {
ApplicationId appId = ApplicationId.newInstance(1, 2);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 0);
JobId jobId = MRBuilderUtils.newJobId(appId, 1);
TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
Path jobFile = mock(Path.class);
MockEventHandler eventHandler = new MockEventHandler();
TaskAttemptListener taListener = mock(TaskAttemptListener.class);
when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost", 0));
JobConf jobConf = new JobConf();
jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class);
jobConf.setBoolean("fs.file.impl.disable.cache", true);
jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, "");
jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, "10");
TaskSplitMetaInfo splits = mock(TaskSplitMetaInfo.class);
when(splits.getLocations()).thenReturn(new String[] {});
AppContext appCtx = mock(AppContext.class);
ClusterInfo clusterInfo = mock(ClusterInfo.class);
Resource resource = mock(Resource.class);
when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
when(resource.getMemory()).thenReturn(1024);
TaskAttemptImpl taImpl =
new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1,
splits, jobConf, taListener,
new Token(), new Credentials(),
new SystemClock(), appCtx);
NodeId nid = NodeId.newInstance("127.0.0.1", 0);
ContainerId contId = ContainerId.newContainerId(appAttemptId, 3);
Container container = mock(Container.class);
when(container.getId()).thenReturn(contId);
when(container.getNodeId()).thenReturn(nid);
when(container.getNodeHttpAddress()).thenReturn("localhost:0");
taImpl.handle(new TaskAttemptEvent(attemptId,
TaskAttemptEventType.TA_SCHEDULE));
taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,
container, mock(Map.class)));
taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId, 0));
taImpl.handle(new TaskAttemptEvent(attemptId,
TaskAttemptEventType.TA_COMMIT_PENDING));
assertEquals("Task attempt is not in commit pending state", taImpl.getState(),
TaskAttemptState.COMMIT_PENDING);
taImpl.handle(new TaskAttemptEvent(attemptId,
TaskAttemptEventType.TA_CONTAINER_CLEANED));
assertFalse("InternalError occurred trying to handle TA_CONTAINER_CLEANED",
eventHandler.internalError);
assertEquals("Task attempt is assigned locally", Locality.OFF_SWITCH,
taImpl.getLocality());
}
示例12: testPoolLimits
import org.apache.hadoop.yarn.api.records.ApplicationId; //导入方法依赖的package包/类
@Test(timeout = 5000)
public void testPoolLimits() throws InterruptedException {
ApplicationId appId = ApplicationId.newInstance(12345, 67);
ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(
appId, 3);
JobId jobId = MRBuilderUtils.newJobId(appId, 8);
TaskId taskId = MRBuilderUtils.newTaskId(jobId, 9, TaskType.MAP);
TaskAttemptId taskAttemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
ContainerId containerId = ContainerId.newContainerId(appAttemptId, 10);
AppContext context = mock(AppContext.class);
CustomContainerLauncher containerLauncher = new CustomContainerLauncher(
context);
Configuration conf = new Configuration();
conf.setInt(MRJobConfig.MR_AM_CONTAINERLAUNCHER_THREAD_COUNT_LIMIT, 12);
containerLauncher.init(conf);
containerLauncher.start();
ThreadPoolExecutor threadPool = containerLauncher.getThreadPool();
// 10 different hosts
containerLauncher.expectedCorePoolSize = containerLauncher.initialPoolSize;
for (int i = 0; i < 10; i++) {
containerLauncher.handle(new ContainerLauncherEvent(taskAttemptId,
containerId, "host" + i + ":1234", null,
ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH));
}
waitForEvents(containerLauncher, 10);
Assert.assertEquals(10, threadPool.getPoolSize());
Assert.assertNull(containerLauncher.foundErrors);
// 4 more different hosts, but thread pool size should be capped at 12
containerLauncher.expectedCorePoolSize = 12 ;
for (int i = 1; i <= 4; i++) {
containerLauncher.handle(new ContainerLauncherEvent(taskAttemptId,
containerId, "host1" + i + ":1234", null,
ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH));
}
waitForEvents(containerLauncher, 12);
Assert.assertEquals(12, threadPool.getPoolSize());
Assert.assertNull(containerLauncher.foundErrors);
// Make some threads ideal so that remaining events are also done.
containerLauncher.finishEventHandling = true;
waitForEvents(containerLauncher, 14);
Assert.assertEquals(12, threadPool.getPoolSize());
Assert.assertNull(containerLauncher.foundErrors);
containerLauncher.stop();
}
示例13: testContainerTokenWithEpoch
import org.apache.hadoop.yarn.api.records.ApplicationId; //导入方法依赖的package包/类
/**
* This tests whether a containerId is serialized/deserialized with epoch.
*
* @throws IOException
* @throws InterruptedException
* @throws YarnException
*/
private void testContainerTokenWithEpoch(Configuration conf)
throws IOException, InterruptedException, YarnException {
LOG.info("Running test for serializing/deserializing containerIds");
NMTokenSecretManagerInRM nmTokenSecretManagerInRM =
yarnCluster.getResourceManager().getRMContext()
.getNMTokenSecretManager();
ApplicationId appId = ApplicationId.newInstance(1, 1);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 0);
ContainerId cId = ContainerId.newContainerId(appAttemptId, (5L << 40) | 3L);
NodeManager nm = yarnCluster.getNodeManager(0);
NMTokenSecretManagerInNM nmTokenSecretManagerInNM =
nm.getNMContext().getNMTokenSecretManager();
String user = "test";
waitForNMToReceiveNMTokenKey(nmTokenSecretManagerInNM, nm);
NodeId nodeId = nm.getNMContext().getNodeId();
// Both id should be equal.
Assert.assertEquals(nmTokenSecretManagerInNM.getCurrentKey().getKeyId(),
nmTokenSecretManagerInRM.getCurrentKey().getKeyId());
// Creating a normal Container Token
RMContainerTokenSecretManager containerTokenSecretManager =
yarnCluster.getResourceManager().getRMContext().
getContainerTokenSecretManager();
Resource r = Resource.newInstance(1230, 2);
Token containerToken =
containerTokenSecretManager.createContainerToken(cId, nodeId, user, r,
Priority.newInstance(0), 0);
ContainerTokenIdentifier containerTokenIdentifier =
new ContainerTokenIdentifier();
byte[] tokenIdentifierContent = containerToken.getIdentifier().array();
DataInputBuffer dib = new DataInputBuffer();
dib.reset(tokenIdentifierContent, tokenIdentifierContent.length);
containerTokenIdentifier.readFields(dib);
Assert.assertEquals(cId, containerTokenIdentifier.getContainerID());
Assert.assertEquals(
cId.toString(), containerTokenIdentifier.getContainerID().toString());
Token nmToken =
nmTokenSecretManagerInRM.createNMToken(appAttemptId, nodeId, user);
YarnRPC rpc = YarnRPC.create(conf);
testStartContainer(rpc, appAttemptId, nodeId, containerToken, nmToken,
false);
List<ContainerId> containerIds = new LinkedList<ContainerId>();
containerIds.add(cId);
ContainerManagementProtocol proxy
= getContainerManagementProtocolProxy(rpc, nmToken, nodeId, user);
GetContainerStatusesResponse res = proxy.getContainerStatuses(
GetContainerStatusesRequest.newInstance(containerIds));
Assert.assertNotNull(res.getContainerStatuses().get(0));
Assert.assertEquals(
cId, res.getContainerStatuses().get(0).getContainerId());
Assert.assertEquals(cId.toString(),
res.getContainerStatuses().get(0).getContainerId().toString());
}
示例14: createAppAttemptId
import org.apache.hadoop.yarn.api.records.ApplicationId; //导入方法依赖的package包/类
private ApplicationAttemptId createAppAttemptId(
long clusterTimeStamp, int id, int attemptId) {
ApplicationId appId = ApplicationId.newInstance(clusterTimeStamp, id);
return ApplicationAttemptId.newInstance(appId, attemptId);
}
示例15: testLaunchFailedWhileKilling
import org.apache.hadoop.yarn.api.records.ApplicationId; //导入方法依赖的package包/类
@Test
public void testLaunchFailedWhileKilling() throws Exception {
ApplicationId appId = ApplicationId.newInstance(1, 2);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 0);
JobId jobId = MRBuilderUtils.newJobId(appId, 1);
TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
Path jobFile = mock(Path.class);
MockEventHandler eventHandler = new MockEventHandler();
TaskAttemptListener taListener = mock(TaskAttemptListener.class);
when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost", 0));
JobConf jobConf = new JobConf();
jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class);
jobConf.setBoolean("fs.file.impl.disable.cache", true);
jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, "");
jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, "10");
TaskSplitMetaInfo splits = mock(TaskSplitMetaInfo.class);
when(splits.getLocations()).thenReturn(new String[] {"127.0.0.1"});
TaskAttemptImpl taImpl =
new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1,
splits, jobConf, taListener,
new Token(), new Credentials(),
new SystemClock(), null);
NodeId nid = NodeId.newInstance("127.0.0.1", 0);
ContainerId contId = ContainerId.newContainerId(appAttemptId, 3);
Container container = mock(Container.class);
when(container.getId()).thenReturn(contId);
when(container.getNodeId()).thenReturn(nid);
taImpl.handle(new TaskAttemptEvent(attemptId,
TaskAttemptEventType.TA_SCHEDULE));
taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,
container, mock(Map.class)));
taImpl.handle(new TaskAttemptEvent(attemptId,
TaskAttemptEventType.TA_KILL));
taImpl.handle(new TaskAttemptEvent(attemptId,
TaskAttemptEventType.TA_CONTAINER_CLEANED));
taImpl.handle(new TaskAttemptEvent(attemptId,
TaskAttemptEventType.TA_CONTAINER_LAUNCH_FAILED));
assertFalse(eventHandler.internalError);
assertEquals("Task attempt is not assigned on the local node",
Locality.NODE_LOCAL, taImpl.getLocality());
}