本文整理汇总了Java中org.apache.hadoop.mapred.Counters.Group.iterator方法的典型用法代码示例。如果您正苦于以下问题:Java Group.iterator方法的具体用法?Java Group.iterator怎么用?Java Group.iterator使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.mapred.Counters.Group
的用法示例。
在下文中一共展示了Group.iterator方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testFileSystemGroupIteratorConcurrency
import org.apache.hadoop.mapred.Counters.Group; //导入方法依赖的package包/类
@Test
public void testFileSystemGroupIteratorConcurrency() {
Counters counters = new Counters();
// create 2 filesystem counter groups
counters.findCounter("fs1", FileSystemCounter.BYTES_READ).increment(1);
counters.findCounter("fs2", FileSystemCounter.BYTES_READ).increment(1);
// Iterate over the counters in this group while updating counters in
// the group
Group group = counters.getGroup(FileSystemCounter.class.getName());
Iterator<Counter> iterator = group.iterator();
counters.findCounter("fs3", FileSystemCounter.BYTES_READ).increment(1);
assertTrue(iterator.hasNext());
iterator.next();
counters.findCounter("fs3", FileSystemCounter.BYTES_READ).increment(1);
assertTrue(iterator.hasNext());
iterator.next();
}
示例2: countersToJson
import org.apache.hadoop.mapred.Counters.Group; //导入方法依赖的package包/类
public static Object countersToJson(Counters counters) {
Map<String, Object> jsonObj = new HashMap<String, Object>();
if (counters == null) {
return jsonObj;
}
Collection<String> counterGroups = counters.getGroupNames();
for (String groupName : counterGroups) {
Map<String, String> counterStats = new HashMap<String, String>();
Group group = counters.getGroup(groupName);
Iterator<Counters.Counter> it = group.iterator();
while (it.hasNext()) {
Counter counter = it.next();
counterStats.put(counter.getDisplayName(),
String.valueOf(counter.getCounter()));
}
jsonObj.put(groupName, counterStats);
}
return jsonObj;
}
示例3: testGroupIteratorConcurrency
import org.apache.hadoop.mapred.Counters.Group; //导入方法依赖的package包/类
@SuppressWarnings("deprecation")
@Test
public void testGroupIteratorConcurrency() {
Counters counters = new Counters();
counters.incrCounter("group1", "counter1", 1);
Group group = counters.getGroup("group1");
Iterator<Counter> iterator = group.iterator();
counters.incrCounter("group1", "counter2", 1);
iterator.next();
}
示例4: saveCounters
import org.apache.hadoop.mapred.Counters.Group; //导入方法依赖的package包/类
/**
* Reads the global counters produced by a job on the group labeled with PIG_MAP_RANK_NAME.
* Then, it is calculated the cumulative sum, which consists on the sum of previous cumulative
* sum plus the previous global counter value.
* @param job with the global counters collected.
* @param operationID After being collected on global counters (POCounter),
* these values are passed via configuration file to PORank, by using the unique
* operation identifier
*/
private void saveCounters(Job job, String operationID) {
Counters counters;
Group groupCounters;
Long previousValue = 0L;
Long previousSum = 0L;
ArrayList<Pair<String,Long>> counterPairs;
try {
counters = HadoopShims.getCounters(job);
groupCounters = counters.getGroup(getGroupName(counters.getGroupNames()));
Iterator<Counter> it = groupCounters.iterator();
HashMap<Integer,Long> counterList = new HashMap<Integer, Long>();
while(it.hasNext()) {
try{
Counter c = it.next();
counterList.put(Integer.valueOf(c.getDisplayName()), c.getValue());
} catch (Exception ex) {
ex.printStackTrace();
}
}
counterSize = counterList.size();
counterPairs = new ArrayList<Pair<String,Long>>();
for(int i = 0; i < counterSize; i++){
previousSum += previousValue;
previousValue = counterList.get(Integer.valueOf(i));
counterPairs.add(new Pair<String, Long>(JobControlCompiler.PIG_MAP_COUNTER + operationID + JobControlCompiler.PIG_MAP_SEPARATOR + i, previousSum));
}
globalCounters.put(operationID, counterPairs);
} catch (Exception e) {
String msg = "Error to read counters into Rank operation counterSize "+counterSize;
throw new RuntimeException(msg, e);
}
}
示例5: printCounters
import org.apache.hadoop.mapred.Counters.Group; //导入方法依赖的package包/类
private void printCounters(StringBuffer buff, JobHistory.JobInfo job)
throws ParseException {
Counters mapCounters =
Counters.fromEscapedCompactString(job.get(Keys.MAP_COUNTERS));
Counters reduceCounters =
Counters.fromEscapedCompactString(job.get(Keys.REDUCE_COUNTERS));
Counters totalCounters =
Counters.fromEscapedCompactString(job.get(Keys.COUNTERS));
// Killed jobs might not have counters
if (totalCounters == null) {
return;
}
buff.append("\nCounters: \n\n");
buff.append(String.format("|%1$-30s|%2$-30s|%3$-10s|%4$-10s|%5$-10s|",
"Group Name",
"Counter name",
"Map Value",
"Reduce Value",
"Total Value"));
buff.append("\n------------------------------------------"+
"---------------------------------------------");
for (String groupName : totalCounters.getGroupNames()) {
Group totalGroup = totalCounters.getGroup(groupName);
Group mapGroup = mapCounters.getGroup(groupName);
Group reduceGroup = reduceCounters.getGroup(groupName);
Format decimal = new DecimalFormat();
Iterator<Counter> ctrItr = totalGroup.iterator();
while (ctrItr.hasNext()) {
Counter counter = ctrItr.next();
String name = counter.getDisplayName();
String mapValue = decimal.format(mapGroup.getCounter(name));
String reduceValue = decimal.format(reduceGroup.getCounter(name));
String totalValue = decimal.format(counter.getValue());
buff.append(
String.format("\n|%1$-30s|%2$-30s|%3$-10s|%4$-10s|%5$-10s",
totalGroup.getDisplayName(),
counter.getDisplayName(),
mapValue, reduceValue, totalValue));
}
}
}
示例6: printCounters
import org.apache.hadoop.mapred.Counters.Group; //导入方法依赖的package包/类
private void printCounters(StringBuffer buff, JobInfo job)
throws ParseException {
Counters mapCounters =
Counters.fromEscapedCompactString(job.get(Keys.MAP_COUNTERS));
Counters reduceCounters =
Counters.fromEscapedCompactString(job.get(Keys.REDUCE_COUNTERS));
Counters totalCounters =
Counters.fromEscapedCompactString(job.get(Keys.COUNTERS));
// Killed jobs might not have counters
if (totalCounters == null) {
return;
}
buff.append("\nCounters: \n\n");
buff.append(String.format("|%1$-30s|%2$-30s|%3$-10s|%4$-10s|%5$-10s|",
"Group Name",
"Counter name",
"Map Value",
"Reduce Value",
"Total Value"));
buff.append("\n------------------------------------------"+
"---------------------------------------------");
for (String groupName : totalCounters.getGroupNames()) {
Group totalGroup = totalCounters.getGroup(groupName);
Group mapGroup = mapCounters.getGroup(groupName);
Group reduceGroup = reduceCounters.getGroup(groupName);
Format decimal = new DecimalFormat();
Iterator<Counter> ctrItr = totalGroup.iterator();
while (ctrItr.hasNext()) {
Counter counter = ctrItr.next();
String name = counter.getDisplayName();
String mapValue = decimal.format(mapGroup.getCounter(name));
String reduceValue = decimal.format(reduceGroup.getCounter(name));
String totalValue = decimal.format(counter.getValue());
buff.append(
String.format("\n|%1$-30s|%2$-30s|%3$-10s|%4$-10s|%5$-10s",
totalGroup.getDisplayName(),
counter.getDisplayName(),
mapValue, reduceValue, totalValue));
}
}
}
示例7: saveCounters
import org.apache.hadoop.mapred.Counters.Group; //导入方法依赖的package包/类
/**
* Reads the global counters produced by a job on the group labeled with PIG_MAP_RANK_NAME.
* Then, it is calculated the cumulative sum, which consists on the sum of previous cumulative
* sum plus the previous global counter value.
* @param job with the global counters collected.
* @param operationID After being collected on global counters (POCounter),
* these values are passed via configuration file to PORank, by using the unique
* operation identifier
*/
private void saveCounters(Job job, String operationID) {
Counters counters;
Group groupCounters;
Long previousValue = 0L;
Long previousSum = 0L;
ArrayList<Pair<String,Long>> counterPairs;
try {
counters = HadoopShims.getCounters(job);
String groupName = getGroupName(counters.getGroupNames());
// In case that the counter group was not find, we need to find
// out why. Only acceptable state is that the relation has been
// empty.
if (groupName == null) {
Counter outputRecords =
counters.getGroup(MRPigStatsUtil.TASK_COUNTER_GROUP)
.getCounterForName(MRPigStatsUtil.MAP_OUTPUT_RECORDS);
if(outputRecords.getCounter() == 0) {
globalCounters.put(operationID, new ArrayList<Pair<String, Long>>());
return;
} else {
throw new RuntimeException("Did not found RANK counter group for operationId: " + operationID);
}
}
groupCounters = counters.getGroup(groupName);
Iterator<Counter> it = groupCounters.iterator();
HashMap<Integer,Long> counterList = new HashMap<Integer, Long>();
while(it.hasNext()) {
try{
Counter c = it.next();
counterList.put(Integer.valueOf(c.getDisplayName()), c.getValue());
} catch (Exception ex) {
ex.printStackTrace();
}
}
counterSize = counterList.size();
counterPairs = new ArrayList<Pair<String,Long>>();
for(int i = 0; i < counterSize; i++){
previousSum += previousValue;
previousValue = counterList.get(Integer.valueOf(i));
counterPairs.add(new Pair<String, Long>(JobControlCompiler.PIG_MAP_COUNTER + operationID + JobControlCompiler.PIG_MAP_SEPARATOR + i, previousSum));
}
globalCounters.put(operationID, counterPairs);
} catch (Exception e) {
String msg = "Error to read counters into Rank operation counterSize "+counterSize;
throw new RuntimeException(msg, e);
}
}