本文整理匯總了Java中org.apache.hadoop.yarn.api.records.Priority.newInstance方法的典型用法代碼示例。如果您正苦於以下問題:Java Priority.newInstance方法的具體用法?Java Priority.newInstance怎麽用?Java Priority.newInstance使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.yarn.api.records.Priority
的用法示例。
在下文中一共展示了Priority.newInstance方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: handle
import org.apache.hadoop.yarn.api.records.Priority; //導入方法依賴的package包/類
@Override
public void handle(ContainerAllocatorEvent event) {
ContainerId cId =
ContainerId.newContainerId(getContext().getApplicationAttemptId(),
containerCount++);
NodeId nodeId = NodeId.newInstance(NM_HOST, NM_PORT);
Resource resource = Resource.newInstance(1234, 2, 2);
ContainerTokenIdentifier containerTokenIdentifier =
new ContainerTokenIdentifier(cId, nodeId.toString(), "user",
resource, System.currentTimeMillis() + 10000, 42, 42,
Priority.newInstance(0), 0);
Token containerToken = newContainerToken(nodeId, "password".getBytes(),
containerTokenIdentifier);
Container container = Container.newInstance(cId, nodeId,
NM_HOST + ":" + NM_HTTP_PORT, resource, null, containerToken);
JobID id = TypeConverter.fromYarn(applicationId);
JobId jobId = TypeConverter.toYarn(id);
getContext().getEventHandler().handle(new JobHistoryEvent(jobId,
new NormalizedResourceEvent(
org.apache.hadoop.mapreduce.TaskType.REDUCE,
100)));
getContext().getEventHandler().handle(new JobHistoryEvent(jobId,
new NormalizedResourceEvent(
org.apache.hadoop.mapreduce.TaskType.MAP,
100)));
getContext().getEventHandler().handle(
new TaskAttemptContainerAssignedEvent(event.getAttemptID(),
container, null));
}
示例2: createContainerToken
import org.apache.hadoop.yarn.api.records.Priority; //導入方法依賴的package包/類
public static Token createContainerToken(ContainerId cId, long rmIdentifier,
NodeId nodeId, String user,
NMContainerTokenSecretManager containerTokenSecretManager,
LogAggregationContext logAggregationContext)
throws IOException {
Resource r = BuilderUtils.newResource(1024, 1);
ContainerTokenIdentifier containerTokenIdentifier =
new ContainerTokenIdentifier(cId, nodeId.toString(), user, r,
System.currentTimeMillis() + 100000L, 123, rmIdentifier,
Priority.newInstance(0), 0, logAggregationContext);
Token containerToken =
BuilderUtils
.newContainerToken(nodeId, containerTokenSecretManager
.retrievePassword(containerTokenIdentifier),
containerTokenIdentifier);
return containerToken;
}
示例3: setupContainerAskForRM
import org.apache.hadoop.yarn.api.records.Priority; //導入方法依賴的package包/類
/**
* Setup the request that will be sent to the RM for the container ask.
*
* @return the setup ResourceRequest to be sent to RM
*/
private ContainerRequest setupContainerAskForRM() {
// setup requirements for hosts
// using * as any host will do for the distributed shell app
// set the priority for the request
// TODO - what is the range for priority? how to decide?
Priority pri = Priority.newInstance(requestPriority);
// Set up resource type requirements
// For now, memory and CPU are supported so we set memory and cpu requirements
Resource capability = Resource.newInstance(containerMemory,
containerVirtualCores, containerGpuCores);
ContainerRequest request = new ContainerRequest(capability, null, null,
pri);
LOG.info("Requested container ask: " + request.toString());
return request;
}
示例4: testFillInRacks
import org.apache.hadoop.yarn.api.records.Priority; //導入方法依賴的package包/類
@Test
public void testFillInRacks() {
AMRMClientImpl<ContainerRequest> client =
new AMRMClientImpl<ContainerRequest>();
Configuration conf = new Configuration();
conf.setClass(
CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
MyResolver.class, DNSToSwitchMapping.class);
client.init(conf);
Resource capability = Resource.newInstance(1024, 1, 1);
ContainerRequest request =
new ContainerRequest(capability, new String[] {"host1", "host2"},
new String[] {"/rack2"}, Priority.newInstance(1));
client.addContainerRequest(request);
verifyResourceRequest(client, request, "host1", true);
verifyResourceRequest(client, request, "host2", true);
verifyResourceRequest(client, request, "/rack1", true);
verifyResourceRequest(client, request, "/rack2", true);
verifyResourceRequest(client, request, ResourceRequest.ANY, true);
}
示例5: testDifferentLocalityRelaxationSamePriority
import org.apache.hadoop.yarn.api.records.Priority; //導入方法依賴的package包/類
@Test (expected = InvalidContainerRequestException.class)
public void testDifferentLocalityRelaxationSamePriority() {
AMRMClientImpl<ContainerRequest> client =
new AMRMClientImpl<ContainerRequest>();
Configuration conf = new Configuration();
conf.setClass(
CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
MyResolver.class, DNSToSwitchMapping.class);
client.init(conf);
Resource capability = Resource.newInstance(1024, 1, 1);
ContainerRequest request1 =
new ContainerRequest(capability, new String[] {"host1", "host2"},
null, Priority.newInstance(1), false);
client.addContainerRequest(request1);
ContainerRequest request2 =
new ContainerRequest(capability, new String[] {"host3"},
null, Priority.newInstance(1), true);
client.addContainerRequest(request2);
}
示例6: testLocalityRelaxationDifferentLevels
import org.apache.hadoop.yarn.api.records.Priority; //導入方法依賴的package包/類
@Test (expected = InvalidContainerRequestException.class)
public void testLocalityRelaxationDifferentLevels() {
AMRMClientImpl<ContainerRequest> client =
new AMRMClientImpl<ContainerRequest>();
Configuration conf = new Configuration();
conf.setClass(
CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
MyResolver.class, DNSToSwitchMapping.class);
client.init(conf);
Resource capability = Resource.newInstance(1024, 1, 1);
ContainerRequest request1 =
new ContainerRequest(capability, new String[] {"host1", "host2"},
null, Priority.newInstance(1), false);
client.addContainerRequest(request1);
ContainerRequest request2 =
new ContainerRequest(capability, null,
new String[] {"rack1"}, Priority.newInstance(1), true);
client.addContainerRequest(request2);
}
示例7: setupContainerAskForRM
import org.apache.hadoop.yarn.api.records.Priority; //導入方法依賴的package包/類
/**
* Setup the request that will be sent to the RM for the container ask.
*
* @return the setup ResourceRequest to be sent to RM
*/
private ContainerRequest setupContainerAskForRM() {
// Set up resource type requirements
// For now, memory and CPU are supported so we set memory and cpu requirements
Resource capability = Resource.newInstance(containerMemory, containerVCores);
Priority priority = Priority.newInstance(0);
return new ContainerRequest(capability, null, null, priority);
}
示例8: newContainerToken
import org.apache.hadoop.yarn.api.records.Priority; //導入方法依賴的package包/類
public static Token newContainerToken(ContainerId cId, String host,
int port, String user, Resource r, long expiryTime, int masterKeyId,
byte[] password, long rmIdentifier) throws IOException {
ContainerTokenIdentifier identifier =
new ContainerTokenIdentifier(cId, host + ":" + port, user, r,
expiryTime, masterKeyId, rmIdentifier, Priority.newInstance(0), 0);
return newContainerToken(BuilderUtils.newNodeId(host, port), password,
identifier);
}
示例9: createContainerTokenId
import org.apache.hadoop.yarn.api.records.Priority; //導入方法依賴的package包/類
private static ContainerTokenIdentifier createContainerTokenId(
ContainerId cid, NodeId nodeId, String user,
NMContainerTokenSecretManager secretMgr) throws IOException {
long rmid = cid.getApplicationAttemptId().getApplicationId()
.getClusterTimestamp();
ContainerTokenIdentifier ctid = new ContainerTokenIdentifier(cid,
nodeId.toString(), user, BuilderUtils.newResource(1024, 1),
System.currentTimeMillis() + 100000L,
secretMgr.getCurrentKey().getKeyId(), rmid,
Priority.newInstance(0), 0);
Token token = BuilderUtils.newContainerToken(nodeId,
secretMgr.createPassword(ctid), ctid);
return BuilderUtils.newContainerTokenIdentifier(token);
}
示例10: FSAppAttempt
import org.apache.hadoop.yarn.api.records.Priority; //導入方法依賴的package包/類
public FSAppAttempt(FairScheduler scheduler,
ApplicationAttemptId applicationAttemptId, String user, FSLeafQueue queue,
ActiveUsersManager activeUsersManager, RMContext rmContext) {
super(applicationAttemptId, user, queue, activeUsersManager, rmContext);
this.scheduler = scheduler;
this.startTime = scheduler.getClock().getTime();
this.priority = Priority.newInstance(1);
this.resourceWeights = new ResourceWeights();
}
示例11: testInvalidValidWhenOldRemoved
import org.apache.hadoop.yarn.api.records.Priority; //導入方法依賴的package包/類
@Test
public void testInvalidValidWhenOldRemoved() {
AMRMClientImpl<ContainerRequest> client =
new AMRMClientImpl<ContainerRequest>();
Configuration conf = new Configuration();
conf.setClass(
CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
MyResolver.class, DNSToSwitchMapping.class);
client.init(conf);
Resource capability = Resource.newInstance(1024, 1, 1);
ContainerRequest request1 =
new ContainerRequest(capability, new String[] {"host1", "host2"},
null, Priority.newInstance(1), false);
client.addContainerRequest(request1);
client.removeContainerRequest(request1);
ContainerRequest request2 =
new ContainerRequest(capability, new String[] {"host3"},
null, Priority.newInstance(1), true);
client.addContainerRequest(request2);
client.removeContainerRequest(request2);
ContainerRequest request3 =
new ContainerRequest(capability, new String[] {"host1", "host2"},
null, Priority.newInstance(1), false);
client.addContainerRequest(request3);
client.removeContainerRequest(request3);
ContainerRequest request4 =
new ContainerRequest(capability, null,
new String[] {"rack1"}, Priority.newInstance(1), true);
client.addContainerRequest(request4);
}
示例12: addContainerRequest
import org.apache.hadoop.yarn.api.records.Priority; //導入方法依賴的package包/類
private void addContainerRequest(TaskStatusLocator locator, ContainerRequest request) {
nextContainerRequestPriority = Priority.newInstance(nextContainerRequestPriority.getPriority() + 1);
taskContainerRequests.put(locator, request);
priorityLocators.put(request.getPriority(), locator);
}
示例13: testContainerTokenIdentifier
import org.apache.hadoop.yarn.api.records.Priority; //導入方法依賴的package包/類
@Test
public void testContainerTokenIdentifier() throws IOException {
ContainerId containerID = ContainerId.newContainerId(
ApplicationAttemptId.newInstance(ApplicationId.newInstance(
1, 1), 1), 1);
String hostName = "host0";
String appSubmitter = "usr0";
Resource r = Resource.newInstance(1024, 1);
long expiryTimeStamp = 1000;
int masterKeyId = 1;
long rmIdentifier = 1;
Priority priority = Priority.newInstance(1);
long creationTime = 1000;
ContainerTokenIdentifier token = new ContainerTokenIdentifier(
containerID, hostName, appSubmitter, r, expiryTimeStamp,
masterKeyId, rmIdentifier, priority, creationTime);
ContainerTokenIdentifier anotherToken = new ContainerTokenIdentifier();
byte[] tokenContent = token.getBytes();
DataInputBuffer dib = new DataInputBuffer();
dib.reset(tokenContent, tokenContent.length);
anotherToken.readFields(dib);
// verify the whole record equals with original record
Assert.assertEquals("Token is not the same after serialization " +
"and deserialization.", token, anotherToken);
Assert.assertEquals(
"ContainerID from proto is not the same with original token",
anotherToken.getContainerID(), containerID);
Assert.assertEquals(
"Hostname from proto is not the same with original token",
anotherToken.getNmHostAddress(), hostName);
Assert.assertEquals(
"ApplicationSubmitter from proto is not the same with original token",
anotherToken.getApplicationSubmitter(), appSubmitter);
Assert.assertEquals(
"Resource from proto is not the same with original token",
anotherToken.getResource(), r);
Assert.assertEquals(
"expiryTimeStamp from proto is not the same with original token",
anotherToken.getExpiryTimeStamp(), expiryTimeStamp);
Assert.assertEquals(
"KeyId from proto is not the same with original token",
anotherToken.getMasterKeyId(), masterKeyId);
Assert.assertEquals(
"RMIdentifier from proto is not the same with original token",
anotherToken.getRMIdentifier(), rmIdentifier);
Assert.assertEquals(
"Priority from proto is not the same with original token",
anotherToken.getPriority(), priority);
Assert.assertEquals(
"CreationTime from proto is not the same with original token",
anotherToken.getCreationTime(), creationTime);
Assert.assertNull(anotherToken.getLogAggregationContext());
}
示例14: test
import org.apache.hadoop.yarn.api.records.Priority; //導入方法依賴的package包/類
private void test(String rpcClass) throws Exception {
Configuration conf = new Configuration();
conf.set(YarnConfiguration.IPC_RPC_IMPL, rpcClass);
YarnRPC rpc = YarnRPC.create(conf);
String bindAddr = "localhost:0";
InetSocketAddress addr = NetUtils.createSocketAddr(bindAddr);
Server server = rpc.getServer(ContainerManagementProtocol.class,
new DummyContainerManager(), addr, conf, null, 1);
server.start();
RPC.setProtocolEngine(conf, ContainerManagementProtocolPB.class, ProtobufRpcEngine.class);
ContainerManagementProtocol proxy = (ContainerManagementProtocol)
rpc.getProxy(ContainerManagementProtocol.class,
NetUtils.getConnectAddress(server), conf);
ContainerLaunchContext containerLaunchContext =
recordFactory.newRecordInstance(ContainerLaunchContext.class);
ApplicationId applicationId = ApplicationId.newInstance(0, 0);
ApplicationAttemptId applicationAttemptId =
ApplicationAttemptId.newInstance(applicationId, 0);
ContainerId containerId =
ContainerId.newContainerId(applicationAttemptId, 100);
NodeId nodeId = NodeId.newInstance("localhost", 1234);
Resource resource = Resource.newInstance(1234, 2);
ContainerTokenIdentifier containerTokenIdentifier =
new ContainerTokenIdentifier(containerId, "localhost", "user",
resource, System.currentTimeMillis() + 10000, 42, 42,
Priority.newInstance(0), 0);
Token containerToken = newContainerToken(nodeId, "password".getBytes(),
containerTokenIdentifier);
StartContainerRequest scRequest =
StartContainerRequest.newInstance(containerLaunchContext,
containerToken);
List<StartContainerRequest> list = new ArrayList<StartContainerRequest>();
list.add(scRequest);
StartContainersRequest allRequests =
StartContainersRequest.newInstance(list);
proxy.startContainers(allRequests);
List<ContainerId> containerIds = new ArrayList<ContainerId>();
containerIds.add(containerId);
GetContainerStatusesRequest gcsRequest =
GetContainerStatusesRequest.newInstance(containerIds);
GetContainerStatusesResponse response =
proxy.getContainerStatuses(gcsRequest);
List<ContainerStatus> statuses = response.getContainerStatuses();
//test remote exception
boolean exception = false;
try {
StopContainersRequest stopRequest =
recordFactory.newRecordInstance(StopContainersRequest.class);
stopRequest.setContainerIds(containerIds);
proxy.stopContainers(stopRequest);
} catch (YarnException e) {
exception = true;
Assert.assertTrue(e.getMessage().contains(EXCEPTION_MSG));
Assert.assertTrue(e.getMessage().contains(EXCEPTION_CAUSE));
System.out.println("Test Exception is " + e.getMessage());
} catch (Exception ex) {
ex.printStackTrace();
}
Assert.assertTrue(exception);
server.stop();
Assert.assertNotNull(statuses.get(0));
Assert.assertEquals(ContainerState.RUNNING, statuses.get(0).getState());
}
示例15: testRPCTimeout
import org.apache.hadoop.yarn.api.records.Priority; //導入方法依賴的package包/類
private void testRPCTimeout(String rpcClass) throws Exception {
Configuration conf = new Configuration();
// set timeout low for the test
conf.setInt("yarn.rpc.nm-command-timeout", 3000);
conf.set(YarnConfiguration.IPC_RPC_IMPL, rpcClass);
YarnRPC rpc = YarnRPC.create(conf);
String bindAddr = "localhost:0";
InetSocketAddress addr = NetUtils.createSocketAddr(bindAddr);
Server server = rpc.getServer(ContainerManagementProtocol.class,
new DummyContainerManager(), addr, conf, null, 1);
server.start();
try {
ContainerManagementProtocol proxy = (ContainerManagementProtocol) rpc.getProxy(
ContainerManagementProtocol.class,
server.getListenerAddress(), conf);
ContainerLaunchContext containerLaunchContext = recordFactory
.newRecordInstance(ContainerLaunchContext.class);
ApplicationId applicationId = ApplicationId.newInstance(0, 0);
ApplicationAttemptId applicationAttemptId =
ApplicationAttemptId.newInstance(applicationId, 0);
ContainerId containerId =
ContainerId.newContainerId(applicationAttemptId, 100);
NodeId nodeId = NodeId.newInstance("localhost", 1234);
Resource resource = Resource.newInstance(1234, 2, 3);
ContainerTokenIdentifier containerTokenIdentifier =
new ContainerTokenIdentifier(containerId, "localhost", "user",
resource, System.currentTimeMillis() + 10000, 42, 42,
Priority.newInstance(0), 0);
Token containerToken =
TestRPC.newContainerToken(nodeId, "password".getBytes(),
containerTokenIdentifier);
StartContainerRequest scRequest =
StartContainerRequest.newInstance(containerLaunchContext,
containerToken);
List<StartContainerRequest> list = new ArrayList<StartContainerRequest>();
list.add(scRequest);
StartContainersRequest allRequests =
StartContainersRequest.newInstance(list);
try {
proxy.startContainers(allRequests);
} catch (Exception e) {
LOG.info(StringUtils.stringifyException(e));
Assert.assertEquals("Error, exception is not: "
+ SocketTimeoutException.class.getName(),
SocketTimeoutException.class.getName(), e.getClass().getName());
return;
}
} finally {
server.stop();
}
Assert.fail("timeout exception should have occurred!");
}