本文整理汇总了Java中org.apache.hadoop.mapred.YARNRunner类的典型用法代码示例。如果您正苦于以下问题:Java YARNRunner类的具体用法?Java YARNRunner怎么用?Java YARNRunner使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
YARNRunner类属于org.apache.hadoop.mapred包,在下文中一共展示了YARNRunner类的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: printJobStatus
import org.apache.hadoop.mapred.YARNRunner; //导入依赖的package包/类
public JobStatus printJobStatus(YARNRunner yarnRunner, JobID jobID) throws IOException, InterruptedException {
JobStatus jobStatus;
jobStatus = yarnRunner.getJobStatus(jobID);
// print overall job M/R progresses
LOGGER.info("\nJob " + jobStatus.getJobName() + "in queue (" + jobStatus.getQueue() + ")" + " progress M/R: " + jobStatus.getMapProgress() + "/" + jobStatus.getReduceProgress());
LOGGER.info("Tracking URL : " + jobStatus.getTrackingUrl());
LOGGER.info("Reserved memory : " + jobStatus.getReservedMem() + ", used memory : "+ jobStatus.getUsedMem() + " and used slots : "+ jobStatus.getNumUsedSlots());
// list map & reduce tasks statuses and progress
TaskReport[] reports = yarnRunner.getTaskReports(jobID, TaskType.MAP);
for (int i = 0; i < reports.length; i++) {
LOGGER.info("MAP: Status " + reports[i].getCurrentStatus() + " with task ID " + reports[i].getTaskID() + ", and progress " + reports[i].getProgress());
}
reports = yarnRunner.getTaskReports(jobID, TaskType.REDUCE);
for (int i = 0; i < reports.length; i++) {
LOGGER.info("REDUCE: " + reports[i].getCurrentStatus() + " with task ID " + reports[i].getTaskID() + ", and progress " + reports[i].getProgress());
}
return jobStatus;
}
示例2: testClusterWithYarnClientProvider
import org.apache.hadoop.mapred.YARNRunner; //导入依赖的package包/类
@Test
public void testClusterWithYarnClientProvider() throws Exception {
Configuration conf = new Configuration();
conf.set(MRConfig.FRAMEWORK_NAME, "yarn");
Cluster cluster = new Cluster(conf);
assertTrue(cluster.getClient() instanceof YARNRunner);
cluster.close();
}
示例3: testClusterGetDelegationToken
import org.apache.hadoop.mapred.YARNRunner; //导入依赖的package包/类
@Test
public void testClusterGetDelegationToken() throws Exception {
Configuration conf = new Configuration(false);
Cluster cluster = null;
try {
conf = new Configuration();
conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
cluster = new Cluster(conf);
YARNRunner yrunner = (YARNRunner) cluster.getClient();
GetDelegationTokenResponse getDTResponse =
recordFactory.newRecordInstance(GetDelegationTokenResponse.class);
org.apache.hadoop.yarn.api.records.Token rmDTToken = recordFactory.newRecordInstance(
org.apache.hadoop.yarn.api.records.Token.class);
rmDTToken.setIdentifier(ByteBuffer.wrap(new byte[2]));
rmDTToken.setKind("Testclusterkind");
rmDTToken.setPassword(ByteBuffer.wrap("testcluster".getBytes()));
rmDTToken.setService("0.0.0.0:8032");
getDTResponse.setRMDelegationToken(rmDTToken);
final ApplicationClientProtocol cRMProtocol = mock(ApplicationClientProtocol.class);
when(cRMProtocol.getDelegationToken(any(
GetDelegationTokenRequest.class))).thenReturn(getDTResponse);
ResourceMgrDelegate rmgrDelegate = new ResourceMgrDelegate(
new YarnConfiguration(conf)) {
@Override
protected void serviceStart() throws Exception {
assertTrue(this.client instanceof YarnClientImpl);
((YarnClientImpl) this.client).setRMClient(cRMProtocol);
}
};
yrunner.setResourceMgrDelegate(rmgrDelegate);
Token t = cluster.getDelegationToken(new Text(" "));
assertTrue("Token kind is instead " + t.getKind().toString(),
"Testclusterkind".equals(t.getKind().toString()));
} finally {
if (cluster != null) {
cluster.close();
}
}
}
示例4: testClusterGetDelegationToken
import org.apache.hadoop.mapred.YARNRunner; //导入依赖的package包/类
@Test
public void testClusterGetDelegationToken() throws Exception {
Configuration conf = new Configuration(false);
Cluster cluster = null;
try {
conf = new Configuration();
conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
cluster = new Cluster(conf);
YARNRunner yrunner = (YARNRunner) cluster.getClient();
GetDelegationTokenResponse getDTResponse =
recordFactory.newRecordInstance(GetDelegationTokenResponse.class);
org.apache.hadoop.yarn.api.records.Token rmDTToken = recordFactory.newRecordInstance(
org.apache.hadoop.yarn.api.records.Token.class);
rmDTToken.setIdentifier(ByteBuffer.wrap(new byte[2]));
rmDTToken.setKind("Testclusterkind");
rmDTToken.setPassword(ByteBuffer.wrap("testcluster".getBytes()));
rmDTToken.setService("0.0.0.0:8032");
getDTResponse.setRMDelegationToken(rmDTToken);
final ApplicationClientProtocol cRMProtocol = mock(ApplicationClientProtocol.class);
when(cRMProtocol.getDelegationToken(any(
GetDelegationTokenRequest.class))).thenReturn(getDTResponse);
ResourceMgrDelegate rmgrDelegate = new ResourceMgrDelegate(
new YarnConfiguration(conf)) {
@Override
protected void serviceStart() throws Exception {
assertTrue(this.client instanceof YarnClientImpl);
this.client = spy(this.client);
doNothing().when(this.client).close();
((YarnClientImpl) this.client).setRMClient(cRMProtocol);
}
};
yrunner.setResourceMgrDelegate(rmgrDelegate);
Token t = cluster.getDelegationToken(new Text(" "));
assertTrue("Token kind is instead " + t.getKind().toString(),
"Testclusterkind".equals(t.getKind().toString()));
} finally {
if (cluster != null) {
cluster.close();
}
}
}
示例5: main
import org.apache.hadoop.mapred.YARNRunner; //导入依赖的package包/类
public static void main(String[] args) {
try {
JobClient jobClient = new JobClient();
QueueOrchestrator qo = new QueueOrchestrator();
HttpClient client = new HttpClient();
ObjectMapper mapper = new ObjectMapper();
String schedulerURL = "http://sandbox.hortonworks.com:8088/ws/v1/cluster/scheduler";
LOGGER.info("Starting YARN Capacity Queue Test");
LOGGER.info("yarn.scheduler.capacity.root.queues = default,highPriority,lowPriority");
LOGGER.info("yarn.scheduler.capacity.root.highPriority.capacity = 70");
LOGGER.info("yarn.scheduler.capacity.root.lowPriority.capacity = 20");
LOGGER.info("yarn.scheduler.capacity.root.highPriority.default = 10");
LOGGER.info("Scheduler URL: ", schedulerURL);
MRJobStatus mrJobStatus = new MRJobStatus();
QueueInformation queueInformation = new QueueInformation();
//Create low priority setup - low priority root queue (capacity-scheduler.xml)
Path tempDirLow = jobClient.createTempDir("lowPriority");
//Create high priority setup - high priority root queue (capacity-scheduler.xml)
Path tempDirHigh = jobClient.createTempDir("highPriority");
String lowPriorityQueue = new String("lowPriority");
String highPriorityQueue = new String("highPriority");
// create YarnRunner to use for job status listing
Configuration lowPriorityConf = qo.getConfiguration(lowPriorityQueue);
// doesn't matter the configuration as we use YarnRunner only to retrieve job status info
YARNRunner yarnRunner = new YARNRunner(lowPriorityConf);
Configuration highPriorityConf = qo.getConfiguration(lowPriorityQueue);
JobID lowPriorityJobID = qo.submitJobsIntoQueues(lowPriorityQueue, tempDirLow);
JobID highPriorityJobID = qo.submitJobsIntoQueues(highPriorityQueue, tempDirHigh);
// list low priority job status
JobStatus lowPriorityJobStatus = mrJobStatus.printJobStatus(yarnRunner, lowPriorityJobID);
// list high priority job status
JobStatus highPriorityJobStatus = mrJobStatus.printJobStatus(yarnRunner, highPriorityJobID);
// list job statuses & queue information until job(s) are completed
for(;!lowPriorityJobStatus.isJobComplete();) {
highPriorityJobStatus = mrJobStatus.printJobStatus(yarnRunner, highPriorityJobID);
lowPriorityJobStatus = mrJobStatus.printJobStatus(yarnRunner, lowPriorityJobID);
queueInformation.printQueueInfo(client, mapper, schedulerURL);
Thread.sleep(1000);
}
} catch (Exception e) {
LOGGER.error("Exception occured", e);
}
}