本文整理汇总了Java中org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse类的典型用法代码示例。如果您正苦于以下问题:Java GetNewApplicationResponse类的具体用法?Java GetNewApplicationResponse怎么用?Java GetNewApplicationResponse使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
GetNewApplicationResponse类属于org.apache.hadoop.yarn.api.protocolrecords包,在下文中一共展示了GetNewApplicationResponse类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: mockYarnClient
import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse; //导入依赖的package包/类
private static void mockYarnClient() throws YarnException, IOException {
yarnClient = mock(YarnClient.class);
doNothing().when(yarnClient).start();
YarnClientApplication app = mock(YarnClientApplication.class);
when(yarnClient.createApplication()).thenReturn(app);
GetNewApplicationResponse appResponse = mock(GetNewApplicationResponse.class);
when(app.getNewApplicationResponse()).thenReturn(appResponse);
ApplicationSubmissionContext appContext = mock(ApplicationSubmissionContext.class);
when(app.getApplicationSubmissionContext()).thenReturn(appContext);
appId = mock(ApplicationId.class);
when(appContext.getApplicationId()).thenReturn(appId);
doNothing().when(appContext).setApplicationName(Matchers.anyString());
report = mock(ApplicationReport.class);
when(yarnClient.getApplicationReport(appId)).thenReturn(report);
when(appId.getId()).thenReturn(1);
when(appId.toString()).thenReturn("application_1465186316357_0001");
when(report.getDiagnostics()).thenReturn("fake diagnostics");
when(report.getQueue()).thenReturn("fake queue");
when(report.getProgress()).thenReturn(0.5f);
}
示例2: createNewApplication
import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse; //导入依赖的package包/类
/**
* Function that actually creates the ApplicationId by calling the
* ClientRMService
*
* @return returns structure containing the app-id and maximum resource
* capabilities
*/
private NewApplication createNewApplication() {
GetNewApplicationRequest req =
recordFactory.newRecordInstance(GetNewApplicationRequest.class);
GetNewApplicationResponse resp;
try {
resp = rm.getClientRMService().getNewApplication(req);
} catch (YarnException e) {
String msg = "Unable to create new app from RM web service";
LOG.error(msg, e);
throw new YarnRuntimeException(msg, e);
}
NewApplication appId =
new NewApplication(resp.getApplicationId().toString(),
new ResourceInfo(resp.getMaximumResourceCapability()));
return appId;
}
示例3: Application
import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse; //导入依赖的package包/类
public Application(String user, String queue, ResourceManager resourceManager)
throws YarnException {
this.user = user;
this.queue = queue;
this.resourceManager = resourceManager;
// register an application
GetNewApplicationRequest request =
Records.newRecord(GetNewApplicationRequest.class);
GetNewApplicationResponse newApp =
this.resourceManager.getClientRMService().getNewApplication(request);
this.applicationId = newApp.getApplicationId();
this.applicationAttemptId =
ApplicationAttemptId.newInstance(this.applicationId,
this.numAttempts.getAndIncrement());
}
示例4: prepareContainerResource
import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse; //导入依赖的package包/类
private Resource prepareContainerResource(GetNewApplicationResponse newApplicationResponse) {
int memoryMbs = this.config.getInt(GobblinYarnConfigurationKeys.APP_MASTER_MEMORY_MBS_KEY);
int maximumMemoryCapacity = newApplicationResponse.getMaximumResourceCapability().getMemory();
if (memoryMbs > maximumMemoryCapacity) {
LOGGER.info(String.format("Specified AM memory [%d] is above the maximum memory capacity [%d] of the "
+ "cluster, using the maximum memory capacity instead.", memoryMbs, maximumMemoryCapacity));
memoryMbs = maximumMemoryCapacity;
}
int vCores = this.config.getInt(GobblinYarnConfigurationKeys.APP_MASTER_CORES_KEY);
int maximumVirtualCoreCapacity = newApplicationResponse.getMaximumResourceCapability().getVirtualCores();
if (vCores > maximumVirtualCoreCapacity) {
LOGGER.info(String.format("Specified AM vcores [%d] is above the maximum vcore capacity [%d] of the "
+ "cluster, using the maximum vcore capacity instead.", memoryMbs, maximumMemoryCapacity));
vCores = maximumVirtualCoreCapacity;
}
// Set up resource type requirements for ApplicationMaster
return Resource.newInstance(memoryMbs, vCores);
}
示例5: checkAmResourceRequest
import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse; //导入依赖的package包/类
private void checkAmResourceRequest(GetNewApplicationResponse appResponse) {
int maxMem = appResponse.getMaximumResourceCapability().getMemory();
if (amMemory > maxMem) {
logger.log(Level.WARNING,
"AM memory specified above max threshold of cluster. Using max value. Specified: {0}, max: {1}",
new Object[]{amMemory,
maxMem});
amMemory = maxMem;
}
int maxVcores = appResponse.getMaximumResourceCapability().getVirtualCores();
if (amVCores > maxVcores) {
logger.log(Level.WARNING,
"AM vcores specified above max threshold of cluster. Using max value. Specified: {0}, max: {1}",
new Object[]{amVCores,
maxVcores});
amVCores = maxVcores;
}
}
示例6: testRpcCallWithNonValidCert
import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse; //导入依赖的package包/类
@Test
public void testRpcCallWithNonValidCert() throws Exception {
conf.set(HopsSSLSocketFactory.CryptoKeys.KEY_STORE_FILEPATH_KEY.getValue(), err_clientKeyStore.toString());
conf.set(HopsSSLSocketFactory.CryptoKeys.KEY_STORE_PASSWORD_KEY.getValue(), passwd);
conf.set(HopsSSLSocketFactory.CryptoKeys.KEY_PASSWORD_KEY.getValue(), passwd);
conf.set(HopsSSLSocketFactory.CryptoKeys.TRUST_STORE_FILEPATH_KEY.getValue(), err_clientTrustStore.toString());
conf.set(HopsSSLSocketFactory.CryptoKeys.TRUST_STORE_PASSWORD_KEY.getValue(), passwd);
// Exception will be thrown later. JUnit does not execute the code
// after the exception, so make the call in a separate thread
invoker = new Thread(new Invoker(acClient));
invoker.start();
LOG.debug("Creating the second client");
acClient1 = ClientRMProxy.createRMProxy(conf, ApplicationClientProtocol.class,
true);
GetNewApplicationRequest req1 = GetNewApplicationRequest.newInstance();
if (error_mode.equals(CERT_ERR.NO_CA)) {
rule.expect(SSLException.class);
} else if (error_mode.equals(CERT_ERR.ERR_CN)) {
rule.expect(RpcServerException.class);
}
GetNewApplicationResponse res1 = acClient1.getNewApplication(req1);
}
示例7: run
import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse; //导入依赖的package包/类
public boolean run() throws YarnException, IOException {
yarnClient.start();
YarnClientApplication client = yarnClient.createApplication();
GetNewApplicationResponse appResponse = client.getNewApplicationResponse();
appId = appResponse.getApplicationId();
LOG.info("Applicatoin ID = {}", appId);
int maxMemory = appResponse.getMaximumResourceCapability().getMemory();
int maxVCores = appResponse.getMaximumResourceCapability().getVirtualCores();
LOG.info("Max memory = {} and max vcores = {}", maxMemory, maxVCores);
YarnClusterMetrics clusterMetrics =
yarnClient.getYarnClusterMetrics();
LOG.info("Number of NodeManagers = {}", clusterMetrics.getNumNodeManagers());
List<NodeReport> nodeReports = yarnClient.getNodeReports(NodeState.RUNNING);
for (NodeReport node : nodeReports) {
LOG.info("Node ID = {}, address = {}, containers = {}", node.getNodeId(), node.getHttpAddress(),
node.getNumContainers());
}
List<QueueInfo> queueList = yarnClient.getAllQueues();
for (QueueInfo queue : queueList) {
LOG.info("Available queue: {} with capacity {} to {}", queue.getQueueName(), queue.getCapacity(),
queue.getMaximumCapacity());
}
return true;
}
示例8: validateResourceForAM
import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse; //导入依赖的package包/类
/**
* If we do not have min/max, we may not be able to correctly request
* the required resources from the RM for the app master
* Memory ask has to be a multiple of min and less than max.
* Dump out information about cluster capability as seen by the resource manager
* @param app
*/
private void validateResourceForAM(YarnClientApplication app) {
GetNewApplicationResponse appResponse = app.getNewApplicationResponse();
// TODO get min/max resource capabilities from RM and change memory ask if needed
int maxMem = appResponse.getMaximumResourceCapability().getMemory();
LOG.info("Max mem capabililty of resources in this cluster " + maxMem);
// A resource ask cannot exceed the max.
if (amMemory > maxMem) {
LOG.info("AM memory specified above max threshold of cluster. Using max value."
+ ", specified=" + amMemory
+ ", max=" + maxMem);
amMemory = maxMem;
}
int maxVCores = appResponse.getMaximumResourceCapability().getVirtualCores();
LOG.info("Max virtual cores capabililty of resources in this cluster " + maxVCores);
if (amVCores > maxVCores) {
LOG.info("AM virtual cores specified above max threshold of cluster. "
+ "Using max value." + ", specified=" + amVCores
+ ", max=" + maxVCores);
amVCores = maxVCores;
}
}
示例9: checkAndAdjustPerTaskHeapSize
import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse; //导入依赖的package包/类
/**
* Adjust the user-supplied <code>-yh</code> and <code>-w</code>
* settings if they are too small or large for the current cluster,
* and re-record the new settings in the GiraphConfiguration for export.
* @param gnar the GetNewAppResponse from the YARN ResourceManager.
*/
private void checkAndAdjustPerTaskHeapSize(GetNewApplicationResponse gnar) {
// do we have the right heap size on these cluster nodes to run our job?
final int minCapacity = gnar.getMinimumResourceCapability().getMemory();
final int maxCapacity = gnar.getMaximumResourceCapability().getMemory();
// make sure heap size is OK for this cluster's available containers
int giraphMem = giraphConf.getYarnTaskHeapMb();
if (giraphMem == GiraphConstants.GIRAPH_YARN_TASK_HEAP_MB_DEFAULT) {
LOG.info("Defaulting per-task heap size to " + giraphMem + "MB.");
}
if (giraphMem > maxCapacity) {
LOG.info("Giraph's request of heap MB per-task is more than the " +
"minimum; downgrading Giraph to" + maxCapacity + "MB.");
giraphMem = maxCapacity;
}
if (giraphMem < minCapacity) {
LOG.info("Giraph's request of heap MB per-task is less than the " +
"minimum; upgrading Giraph to " + minCapacity + "MB.");
giraphMem = minCapacity;
}
giraphConf.setYarnTaskHeapMb(giraphMem); // record any changes made
}
示例10: getNewApplication
import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse; //导入依赖的package包/类
@Override
public GetNewApplicationResponse getNewApplication(
GetNewApplicationRequest request) throws YarnException,
IOException {
GetNewApplicationRequestProto requestProto =
((GetNewApplicationRequestPBImpl) request).getProto();
try {
return new GetNewApplicationResponsePBImpl(proxy.getNewApplication(null,
requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
示例11: getNewApplication
import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse; //导入依赖的package包/类
@Override
public GetNewApplicationResponse getNewApplication(
GetNewApplicationRequest request) throws YarnException {
GetNewApplicationResponse response = recordFactory
.newRecordInstance(GetNewApplicationResponse.class);
response.setApplicationId(getNewApplicationId());
// Pick up min/max resource from scheduler...
response.setMaximumResourceCapability(scheduler
.getMaximumResourceCapability());
return response;
}
示例12: testGetNewAppId
import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse; //导入依赖的package包/类
@Test
public void testGetNewAppId() throws Exception {
Logger rootLogger = LogManager.getRootLogger();
rootLogger.setLevel(Level.DEBUG);
MockRM rm = new MockRM(conf);
rm.start();
GetNewApplicationResponse resp = rm.getNewAppId();
assert (resp.getApplicationId().getId() != 0);
assert (resp.getMaximumResourceCapability().getMemory() > 0);
rm.stop();
}
示例13: createApplication
import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse; //导入依赖的package包/类
@Override
public YarnClientApplication createApplication()
throws YarnException, IOException {
ApplicationSubmissionContext context = Records.newRecord
(ApplicationSubmissionContext.class);
GetNewApplicationResponse newApp = getNewApplication();
ApplicationId appId = newApp.getApplicationId();
context.setApplicationId(appId);
return new YarnClientApplication(newApp, context);
}
示例14: getNewApplication
import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse; //导入依赖的package包/类
@Override
public GetNewApplicationResponse getNewApplication(
GetNewApplicationRequest request) throws YarnException {
resetStartFailoverFlag(true);
// make sure failover has been triggered
Assert.assertTrue(waittingForFailOver());
// create the GetNewApplicationResponse with fake applicationId
GetNewApplicationResponse response =
GetNewApplicationResponse.newInstance(
createFakeAppId(), null, null);
return response;
}
示例15: getNewApplication
import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse; //导入依赖的package包/类
@Override
public GetNewApplicationResponse getNewApplication(
GetNewApplicationRequest request) throws YarnException, IOException {
NewApplicationResponse newApplication = appClient.getNewApplication();
return GetNewApplicationResponse.newInstance(
newApplication.getApplicationId(), newApplication.getMinCapability(),
newApplication.getMaxCapability());
}
开发者ID:intel-hpdd,项目名称:scheduling-connector-for-hadoop,代码行数:9,代码来源:HPCApplicationClientProtocolImpl.java