本文整理汇总了Java中org.apache.hadoop.mapreduce.Counters类的典型用法代码示例。如果您正苦于以下问题:Java Counters类的具体用法?Java Counters怎么用?Java Counters使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
Counters类属于org.apache.hadoop.mapreduce包,在下文中一共展示了Counters类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: JobMetrics
import org.apache.hadoop.mapreduce.Counters; //导入依赖的package包/类
public JobMetrics(Job job, String bytesReplicatedKey) {
Builder<String, Long> builder = ImmutableMap.builder();
if (job != null) {
Counters counters;
try {
counters = job.getCounters();
} catch (IOException e) {
throw new CircusTrainException("Unable to get counters from job.", e);
}
if (counters != null) {
for (CounterGroup group : counters) {
for (Counter counter : group) {
builder.put(DotJoiner.join(group.getName(), counter.getName()), counter.getValue());
}
}
}
}
metrics = builder.build();
Long bytesReplicatedValue = metrics.get(bytesReplicatedKey);
if (bytesReplicatedValue != null) {
bytesReplicated = bytesReplicatedValue;
} else {
bytesReplicated = 0L;
}
}
示例2: runJob
import org.apache.hadoop.mapreduce.Counters; //导入依赖的package包/类
@Override
protected boolean runJob(Job job) throws ClassNotFoundException, IOException,
InterruptedException {
PerfCounters perfCounters = new PerfCounters();
perfCounters.startClock();
boolean success = doSubmitJob(job);
perfCounters.stopClock();
Counters jobCounters = job.getCounters();
// If the job has been retired, these may be unavailable.
if (null == jobCounters) {
displayRetiredJobNotice(LOG);
} else {
perfCounters.addBytes(jobCounters.getGroup("FileSystemCounters")
.findCounter("HDFS_BYTES_READ").getValue());
LOG.info("Transferred " + perfCounters.toString());
long numRecords = ConfigurationHelper.getNumMapInputRecords(job);
LOG.info("Exported " + numRecords + " records.");
}
return success;
}
示例3: runJob
import org.apache.hadoop.mapreduce.Counters; //导入依赖的package包/类
@Override
protected boolean runJob(Job job) throws ClassNotFoundException, IOException,
InterruptedException {
PerfCounters perfCounters = new PerfCounters();
perfCounters.startClock();
boolean success = doSubmitJob(job);
perfCounters.stopClock();
Counters jobCounters = job.getCounters();
// If the job has been retired, these may be unavailable.
if (null == jobCounters) {
displayRetiredJobNotice(LOG);
} else {
perfCounters.addBytes(jobCounters.getGroup("FileSystemCounters")
.findCounter("HDFS_BYTES_READ").getValue());
LOG.info("Transferred " + perfCounters.toString());
long numRecords = ConfigurationHelper.getNumMapInputRecords(job);
LOG.info("Exported " + numRecords + " records.");
}
return success;
}
示例4: getAllCounters
import org.apache.hadoop.mapreduce.Counters; //导入依赖的package包/类
@Override
public Counters getAllCounters() {
readLock.lock();
try {
JobStateInternal state = getInternalState();
if (state == JobStateInternal.ERROR || state == JobStateInternal.FAILED
|| state == JobStateInternal.KILLED || state == JobStateInternal.SUCCEEDED) {
this.mayBeConstructFinalFullCounters();
return fullCounters;
}
Counters counters = new Counters();
counters.incrAllCounters(jobCounters);
return incrTaskCounters(counters, tasks.values());
} finally {
readLock.unlock();
}
}
示例5: constructFinalFullcounters
import org.apache.hadoop.mapreduce.Counters; //导入依赖的package包/类
@Private
public void constructFinalFullcounters() {
this.fullCounters = new Counters();
this.finalMapCounters = new Counters();
this.finalReduceCounters = new Counters();
this.fullCounters.incrAllCounters(jobCounters);
for (Task t : this.tasks.values()) {
Counters counters = t.getCounters();
switch (t.getType()) {
case MAP:
this.finalMapCounters.incrAllCounters(counters);
break;
case REDUCE:
this.finalReduceCounters.incrAllCounters(counters);
break;
default:
throw new IllegalStateException("Task type neither map nor reduce: " +
t.getType());
}
this.fullCounters.incrAllCounters(counters);
}
}
示例6: getCounters
import org.apache.hadoop.mapreduce.Counters; //导入依赖的package包/类
@Override
public Counters getCounters() {
Counters counters = null;
readLock.lock();
try {
TaskAttempt bestAttempt = selectBestAttempt();
if (bestAttempt != null) {
counters = bestAttempt.getCounters();
} else {
counters = TaskAttemptImpl.EMPTY_COUNTERS;
// counters.groups = new HashMap<CharSequence, CounterGroup>();
}
return counters;
} finally {
readLock.unlock();
}
}
示例7: countersToJSON
import org.apache.hadoop.mapreduce.Counters; //导入依赖的package包/类
@Private
public JsonNode countersToJSON(Counters counters) {
ObjectMapper mapper = new ObjectMapper();
ArrayNode nodes = mapper.createArrayNode();
if (counters != null) {
for (CounterGroup counterGroup : counters) {
ObjectNode groupNode = nodes.addObject();
groupNode.put("NAME", counterGroup.getName());
groupNode.put("DISPLAY_NAME", counterGroup.getDisplayName());
ArrayNode countersNode = groupNode.putArray("COUNTERS");
for (Counter counter : counterGroup) {
ObjectNode counterNode = countersNode.addObject();
counterNode.put("NAME", counter.getName());
counterNode.put("DISPLAY_NAME", counter.getDisplayName());
counterNode.put("VALUE", counter.getValue());
}
}
}
return nodes;
}
示例8: updateStatus
import org.apache.hadoop.mapreduce.Counters; //导入依赖的package包/类
private void updateStatus(MRApp app, TaskAttempt attempt, Phase phase) {
TaskAttemptStatusUpdateEvent.TaskAttemptStatus status = new TaskAttemptStatusUpdateEvent.TaskAttemptStatus();
status.counters = new Counters();
status.fetchFailedMaps = new ArrayList<TaskAttemptId>();
status.id = attempt.getID();
status.mapFinishTime = 0;
status.phase = phase;
status.progress = 0.5f;
status.shuffleFinishTime = 0;
status.sortFinishTime = 0;
status.stateString = "OK";
status.taskState = attempt.getState();
TaskAttemptStatusUpdateEvent event = new TaskAttemptStatusUpdateEvent(attempt.getID(),
status);
app.getContext().getEventHandler().handle(event);
}
示例9: testTaskAttemptFinishedEvent
import org.apache.hadoop.mapreduce.Counters; //导入依赖的package包/类
/**
* test a getters of TaskAttemptFinishedEvent and TaskAttemptFinished
*
* @throws Exception
*/
@Test(timeout = 10000)
public void testTaskAttemptFinishedEvent() throws Exception {
JobID jid = new JobID("001", 1);
TaskID tid = new TaskID(jid, TaskType.REDUCE, 2);
TaskAttemptID taskAttemptId = new TaskAttemptID(tid, 3);
Counters counters = new Counters();
TaskAttemptFinishedEvent test = new TaskAttemptFinishedEvent(taskAttemptId,
TaskType.REDUCE, "TEST", 123L, "RAKNAME", "HOSTNAME", "STATUS",
counters);
assertEquals(test.getAttemptId().toString(), taskAttemptId.toString());
assertEquals(test.getCounters(), counters);
assertEquals(test.getFinishTime(), 123L);
assertEquals(test.getHostname(), "HOSTNAME");
assertEquals(test.getRackName(), "RAKNAME");
assertEquals(test.getState(), "STATUS");
assertEquals(test.getTaskId(), tid);
assertEquals(test.getTaskStatus(), "TEST");
assertEquals(test.getTaskType(), TaskType.REDUCE);
}
示例10: verifySleepJobCounters
import org.apache.hadoop.mapreduce.Counters; //导入依赖的package包/类
protected void verifySleepJobCounters(Job job) throws InterruptedException,
IOException {
Counters counters = job.getCounters();
Assert.assertEquals(3, counters.findCounter(JobCounter.OTHER_LOCAL_MAPS)
.getValue());
Assert.assertEquals(3, counters.findCounter(JobCounter.TOTAL_LAUNCHED_MAPS)
.getValue());
Assert.assertEquals(numSleepReducers,
counters.findCounter(JobCounter.TOTAL_LAUNCHED_REDUCES).getValue());
Assert
.assertTrue(counters.findCounter(JobCounter.SLOTS_MILLIS_MAPS) != null
&& counters.findCounter(JobCounter.SLOTS_MILLIS_MAPS).getValue() != 0);
Assert
.assertTrue(counters.findCounter(JobCounter.SLOTS_MILLIS_MAPS) != null
&& counters.findCounter(JobCounter.SLOTS_MILLIS_MAPS).getValue() != 0);
}
示例11:
import org.apache.hadoop.mapreduce.Counters; //导入依赖的package包/类
/**
* Create an event to record completion of a reduce attempt
* @param id Attempt Id
* @param taskType Type of task
* @param taskStatus Status of the task
* @param shuffleFinishTime Finish time of the shuffle phase
* @param sortFinishTime Finish time of the sort phase
* @param finishTime Finish time of the attempt
* @param hostname Name of the host where the attempt executed
* @param port RPC port for the tracker host.
* @param rackName Name of the rack where the attempt executed
* @param state State of the attempt
* @param counters Counters for the attempt
* @param allSplits the "splits", or a pixelated graph of various
* measurable worker node state variables against progress.
* Currently there are four; wallclock time, CPU time,
* virtual memory and physical memory.
*/
public ReduceAttemptFinishedEvent
(TaskAttemptID id, TaskType taskType, String taskStatus,
long shuffleFinishTime, long sortFinishTime, long finishTime,
String hostname, int port, String rackName, String state,
Counters counters, int[][] allSplits) {
this.attemptId = id;
this.taskType = taskType;
this.taskStatus = taskStatus;
this.shuffleFinishTime = shuffleFinishTime;
this.sortFinishTime = sortFinishTime;
this.finishTime = finishTime;
this.hostname = hostname;
this.rackName = rackName;
this.port = port;
this.state = state;
this.counters = counters;
this.allSplits = allSplits;
this.clockSplits = ProgressSplitsBlock.arrayGetWallclockTime(allSplits);
this.cpuUsages = ProgressSplitsBlock.arrayGetCPUTime(allSplits);
this.gpuUsages = ProgressSplitsBlock.arrayGetGPUTime(allSplits);
this.vMemKbytes = ProgressSplitsBlock.arrayGetVMemKbytes(allSplits);
this.physMemKbytes = ProgressSplitsBlock.arrayGetPhysMemKbytes(allSplits);
}
示例12: toAvro
import org.apache.hadoop.mapreduce.Counters; //导入依赖的package包/类
static JhCounters toAvro(Counters counters, String name) {
JhCounters result = new JhCounters();
result.name = new Utf8(name);
result.groups = new ArrayList<JhCounterGroup>(0);
if (counters == null) return result;
for (CounterGroup group : counters) {
JhCounterGroup g = new JhCounterGroup();
g.name = new Utf8(group.getName());
g.displayName = new Utf8(group.getDisplayName());
g.counts = new ArrayList<JhCounter>(group.size());
for (Counter counter : group) {
JhCounter c = new JhCounter();
c.name = new Utf8(counter.getName());
c.displayName = new Utf8(counter.getDisplayName());
c.value = counter.getValue();
g.counts.add(c);
}
result.groups.add(g);
}
return result;
}
示例13: JobFinishedEvent
import org.apache.hadoop.mapreduce.Counters; //导入依赖的package包/类
/**
* Create an event to record successful job completion
* @param id Job ID
* @param finishTime Finish time of the job
* @param finishedMaps The number of finished maps
* @param finishedReduces The number of finished reduces
* @param failedMaps The number of failed maps
* @param failedReduces The number of failed reduces
* @param mapCounters Map Counters for the job
* @param reduceCounters Reduce Counters for the job
* @param totalCounters Total Counters for the job
*/
public JobFinishedEvent(JobID id, long finishTime,
int finishedMaps, int finishedReduces,
int failedMaps, int failedReduces,
Counters mapCounters, Counters reduceCounters,
Counters totalCounters) {
this.jobId = id;
this.finishTime = finishTime;
this.finishedMaps = finishedMaps;
this.finishedReduces = finishedReduces;
this.failedMaps = failedMaps;
this.failedReduces = failedReduces;
this.mapCounters = mapCounters;
this.reduceCounters = reduceCounters;
this.totalCounters = totalCounters;
}
示例14:
import org.apache.hadoop.mapreduce.Counters; //导入依赖的package包/类
/**
* Create an event for successful completion of map attempts
* @param id Task Attempt ID
* @param taskType Type of the task
* @param taskStatus Status of the task
* @param mapFinishTime Finish time of the map phase
* @param finishTime Finish time of the attempt
* @param hostname Name of the host where the map executed
* @param port RPC port for the tracker host.
* @param rackName Name of the rack where the map executed
* @param state State string for the attempt
* @param counters Counters for the attempt
* @param allSplits the "splits", or a pixelated graph of various
* measurable worker node state variables against progress.
* Currently there are four; wallclock time, CPU time,
* virtual memory and physical memory.
*
* If you have no splits data, code {@code null} for this
* parameter.
*/
public MapAttemptFinishedEvent
(TaskAttemptID id, TaskType taskType, String taskStatus,
long mapFinishTime, long finishTime, String hostname, int port,
String rackName, String state, Counters counters, int[][] allSplits) {
this.attemptId = id;
this.taskType = taskType;
this.taskStatus = taskStatus;
this.mapFinishTime = mapFinishTime;
this.finishTime = finishTime;
this.hostname = hostname;
this.rackName = rackName;
this.port = port;
this.state = state;
this.counters = counters;
this.allSplits = allSplits;
this.clockSplits = ProgressSplitsBlock.arrayGetWallclockTime(allSplits);
this.cpuUsages = ProgressSplitsBlock.arrayGetCPUTime(allSplits);
this.gpuUsages = ProgressSplitsBlock.arrayGetGPUTime(allSplits);
this.vMemKbytes = ProgressSplitsBlock.arrayGetVMemKbytes(allSplits);
this.physMemKbytes = ProgressSplitsBlock.arrayGetPhysMemKbytes(allSplits);
}
示例15: constructTaskReport
import org.apache.hadoop.mapreduce.Counters; //导入依赖的package包/类
private void constructTaskReport() {
loadAllTaskAttempts();
this.report = Records.newRecord(TaskReport.class);
report.setTaskId(taskId);
long minLaunchTime = Long.MAX_VALUE;
for(TaskAttempt attempt: attempts.values()) {
minLaunchTime = Math.min(minLaunchTime, attempt.getLaunchTime());
}
minLaunchTime = minLaunchTime == Long.MAX_VALUE ? -1 : minLaunchTime;
report.setStartTime(minLaunchTime);
report.setFinishTime(taskInfo.getFinishTime());
report.setTaskState(getState());
report.setProgress(getProgress());
Counters counters = getCounters();
if (counters == null) {
counters = EMPTY_COUNTERS;
}
report.setCounters(TypeConverter.toYarn(counters));
if (successfulAttempt != null) {
report.setSuccessfulAttempt(successfulAttempt);
}
report.addAllDiagnostics(reportDiagnostics);
report
.addAllRunningAttempts(new ArrayList<TaskAttemptId>(attempts.keySet()));
}