本文整理汇总了Java中org.apache.hadoop.mapreduce.CounterGroup.findCounter方法的典型用法代码示例。如果您正苦于以下问题:Java CounterGroup.findCounter方法的具体用法?Java CounterGroup.findCounter怎么用?Java CounterGroup.findCounter使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.mapreduce.CounterGroup
的用法示例。
在下文中一共展示了CounterGroup.findCounter方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: CounterGroupInfo
import org.apache.hadoop.mapreduce.CounterGroup; //导入方法依赖的package包/类
public CounterGroupInfo(String name, CounterGroup group, CounterGroup mg,
CounterGroup rg) {
this.counterGroupName = name;
this.counter = new ArrayList<CounterInfo>();
for (Counter c : group) {
Counter mc = mg == null ? null : mg.findCounter(c.getName());
Counter rc = rg == null ? null : rg.findCounter(c.getName());
CounterInfo cinfo = new CounterInfo(c, mc, rc);
this.counter.add(cinfo);
}
}
示例2: genItemMap
import org.apache.hadoop.mapreduce.CounterGroup; //导入方法依赖的package包/类
/**
* Item counting and rebasing job; its output is the rebasing map
* KEY_REBASING_MAX_ID will be set in current Configuration
*
* @return true on success
* @throws IOException
* @throws InterruptedException
* @throws ClassNotFoundException
*/
private boolean genItemMap(String output) throws IOException, ClassNotFoundException, InterruptedException {
Job job = Job.getInstance(this.getConf(), "Computing frequent items mapping to groups, from " + this.input);
job.setJarByClass(TopPIoverHadoop.class);
job.setInputFormatClass(TextInputFormat.class);
job.setOutputFormatClass(SequenceFileOutputFormat.class);
job.setOutputKeyClass(IntWritable.class);
job.setOutputValueClass(IntWritable.class);
FileInputFormat.addInputPath(job, new Path(this.input));
FileOutputFormat.setOutputPath(job, new Path(output));
job.setMapperClass(ItemCountingMapper.class);
job.setMapOutputKeyClass(NullWritable.class);
job.setMapOutputValueClass(ItemAndSupportWritable.class);
job.setReducerClass(ItemCountingReducer.class);
job.setNumReduceTasks(1);
boolean success = job.waitForCompletion(true);
if (success) {
CounterGroup counters = job.getCounters().getGroup(ItemCountingReducer.COUNTERS_GROUP);
Counter rebasingMaxID = counters.findCounter(ItemCountingReducer.COUNTER_REBASING_MAX_ID);
this.getConf().setInt(KEY_REBASING_MAX_ID, (int) rebasingMaxID.getValue());
}
return success;
}
示例3: addReducePhaseDetails
import org.apache.hadoop.mapreduce.CounterGroup; //导入方法依赖的package包/类
/**
* This method is responsible for populating the reduce phase details.
* @return TaskOutputDetails contains the details of the reduce phase.
*/
private TaskOutputDetails addReducePhaseDetails(
Entry<TaskAttemptID, TaskAttemptInfo> task, long referencedZeroTime) {
TaskAttemptInfo taskAttemptInfo = (TaskAttemptInfo) (task.getValue());
TaskOutputDetails taskOutputDetails = new TaskOutputDetails();
taskOutputDetails.setTaskStatus(taskAttemptInfo.getTaskStatus());
taskOutputDetails.setTaskType(taskAttemptInfo.getTaskType().toString());
taskOutputDetails.setTaskID(taskAttemptInfo.getAttemptId().getTaskID().toString());
taskOutputDetails.setLocation(taskAttemptInfo.getHostname());
Counters counters = taskAttemptInfo.getCounters();
CounterGroup mapReduceTaskCounters = counters.getGroup("org.apache.hadoop.mapreduce.TaskCounter");
Counter reduceOutputRecords = mapReduceTaskCounters.findCounter("REDUCE_OUTPUT_RECORDS");
taskOutputDetails.setOutputRecords(reduceOutputRecords.getValue());
Counter reduceOutputBytes = mapReduceTaskCounters.findCounter("SPILLED_RECORDS");
taskOutputDetails.setOutputBytes(reduceOutputBytes.getValue());
long shuffleStartTime = (taskAttemptInfo.getStartTime()- referencedZeroTime)/CONVERSION_FACTOR_MILLISECS_TO_SECS;
taskOutputDetails.setStartPoint(shuffleStartTime);
taskOutputDetails.setShuffleStart(shuffleStartTime);
long shuffleEnd = ((taskAttemptInfo.getShuffleFinishTime()-referencedZeroTime)/CONVERSION_FACTOR_MILLISECS_TO_SECS);
taskOutputDetails.setShuffleEnd(shuffleEnd);
taskOutputDetails.setSortStart(shuffleEnd);
long sortEnd = (taskAttemptInfo.getSortFinishTime()-referencedZeroTime)/CONVERSION_FACTOR_MILLISECS_TO_SECS;
taskOutputDetails.setSortEnd(sortEnd);
taskOutputDetails.setReduceStart(sortEnd);
taskOutputDetails.setReduceEnd((taskAttemptInfo.getFinishTime()-referencedZeroTime)/CONVERSION_FACTOR_MILLISECS_TO_SECS);
taskOutputDetails.setEndPoint(taskOutputDetails.getReduceEnd());
long dataFlowRate = reduceOutputBytes.getValue() / (taskOutputDetails.getReduceEnd()-shuffleStartTime);
taskOutputDetails.setDataFlowRate(dataFlowRate);
Counter physicalMemoryBytes = mapReduceTaskCounters.findCounter("PHYSICAL_MEMORY_BYTES");
ResourceUsageMetrics rum = new ResourceUsageMetrics();
rum.setPhysicalMemoryUsage(physicalMemoryBytes.getValue());
taskOutputDetails.setResourceUsageMetrics(rum);
return taskOutputDetails;
}
示例4: addMapPhaseDetails
import org.apache.hadoop.mapreduce.CounterGroup; //导入方法依赖的package包/类
/**
* Adds detail for a Map phase.
* @param task2
*
* @param task2 the tasks
* @param referencedZeroTime
* @param referencedZeroTime the start time
* @param additionalJobInfo
* @return the phase details
*/
private TaskOutputDetails addMapPhaseDetails(Entry<TaskAttemptID, TaskAttemptInfo> task, long referencedZeroTime) {
TaskAttemptInfo taskAttemptInfo = (TaskAttemptInfo) (task.getValue());
TaskOutputDetails taskOutputDetails = new TaskOutputDetails();
taskOutputDetails.setTaskStatus(taskAttemptInfo.getTaskStatus());
taskOutputDetails.setTaskType(taskAttemptInfo.getTaskType().toString());
taskOutputDetails.setTaskID(taskAttemptInfo.getAttemptId().getTaskID().toString());
long startPoint = (taskAttemptInfo.getStartTime() - referencedZeroTime) / CONVERSION_FACTOR_MILLISECS_TO_SECS;
taskOutputDetails.setStartPoint(startPoint);
long endPoint = (taskAttemptInfo.getMapFinishTime() - referencedZeroTime) / CONVERSION_FACTOR_MILLISECS_TO_SECS;
taskOutputDetails.setEndPoint(endPoint);
taskOutputDetails.setTimeTaken(endPoint - startPoint);
taskOutputDetails.setLocation(taskAttemptInfo.getHostname());
Counters counters = taskAttemptInfo.getCounters();
CounterGroup fileSystemCounters = counters.getGroup("org.apache.hadoop.mapreduce.FileSystemCounter");
Counter inputBytes = fileSystemCounters.findCounter("HDFS_BYTES_READ");
long dataFlowRate = inputBytes.getValue() / (endPoint - startPoint);
taskOutputDetails.setDataFlowRate(dataFlowRate);
CounterGroup mapReduceTaskCounters = counters.getGroup("org.apache.hadoop.mapreduce.TaskCounter");
Counter mapOutputRecords = mapReduceTaskCounters.findCounter("MAP_OUTPUT_RECORDS");
Counter physicalMemoryBytes = mapReduceTaskCounters.findCounter("PHYSICAL_MEMORY_BYTES");
ResourceUsageMetrics rum = new ResourceUsageMetrics();
rum.setPhysicalMemoryUsage(physicalMemoryBytes.getValue());
taskOutputDetails.setResourceUsageMetrics(rum);
taskOutputDetails.setOutputRecords(mapOutputRecords.getValue());
Counter mapOutputBytes = mapReduceTaskCounters.findCounter("MAP_OUTPUT_BYTES");
taskOutputDetails.setOutputBytes(mapOutputBytes.getValue());
return taskOutputDetails;
}