本文整理汇总了Java中org.apache.hadoop.mapred.Counters.Group.getCounterForName方法的典型用法代码示例。如果您正苦于以下问题:Java Group.getCounterForName方法的具体用法?Java Group.getCounterForName怎么用?Java Group.getCounterForName使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.mapred.Counters.Group
的用法示例。
在下文中一共展示了Group.getCounterForName方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: validateCounters
import org.apache.hadoop.mapred.Counters.Group; //导入方法依赖的package包/类
private void validateCounters() throws IOException {
Counters counters = job.running_.getCounters();
assertNotNull("Counters", counters);
Group group = counters.getGroup("UserCounters");
assertNotNull("Group", group);
Counter counter = group.getCounterForName("InputLines");
assertNotNull("Counter", counter);
assertEquals(3, counter.getCounter());
}
示例2: testCommandLine
import org.apache.hadoop.mapred.Counters.Group; //导入方法依赖的package包/类
public void testCommandLine() throws IOException
{
try {
try {
FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile());
} catch (Exception e) {
}
createInput();
boolean mayExit = false;
// During tests, the default Configuration will use a local mapred
// So don't specify -config or -cluster
StreamJob job = new StreamJob(genArgs(), mayExit);
job.go();
File outFile = new File(OUTPUT_DIR, "part-00000").getAbsoluteFile();
String output = StreamUtil.slurp(outFile);
outFile.delete();
assertEquals(outputExpect, output);
Counters counters = job.running_.getCounters();
assertNotNull("Counters", counters);
Group group = counters.getGroup("UserCounters");
assertNotNull("Group", group);
Counter counter = group.getCounterForName("InputLines");
assertNotNull("Counter", counter);
assertEquals(3, counter.getCounter());
} finally {
INPUT_FILE.delete();
FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile());
}
}
示例3: testCommandLine
import org.apache.hadoop.mapred.Counters.Group; //导入方法依赖的package包/类
public void testCommandLine() throws IOException
{
try {
try {
OUTPUT_DIR.getAbsoluteFile().delete();
} catch (Exception e) {
}
createInput();
boolean mayExit = false;
// During tests, the default Configuration will use a local mapred
// So don't specify -config or -cluster
StreamJob job = new StreamJob(genArgs(), mayExit);
job.go();
File outFile = new File(OUTPUT_DIR, "part-00000").getAbsoluteFile();
String output = StreamUtil.slurp(outFile);
outFile.delete();
assertEquals(outputExpect, output);
Counters counters = job.running_.getCounters();
assertNotNull("Counters", counters);
Group group = counters.getGroup("UserCounters");
assertNotNull("Group", group);
Counter counter = group.getCounterForName("InputLines");
assertNotNull("Counter", counter);
assertEquals(3, counter.getCounter());
} finally {
File outFileCRC = new File(OUTPUT_DIR, ".part-00000.crc").getAbsoluteFile();
INPUT_FILE.delete();
outFileCRC.delete();
OUTPUT_DIR.getAbsoluteFile().delete();
}
}
示例4: parseAndAddJobCounters
import org.apache.hadoop.mapred.Counters.Group; //导入方法依赖的package包/类
@SuppressWarnings("deprecation")
private static void parseAndAddJobCounters(Map<String, String> job, String counters) {
try {
Counters counterGroups = Counters.fromEscapedCompactString(counters);
for (Group otherGroup : counterGroups) {
Group group = counterGroups.getGroup(otherGroup.getName());
for (Counter otherCounter : otherGroup) {
Counter counter = group.getCounterForName(otherCounter.getName());
job.put(otherCounter.getName(), String.valueOf(counter.getValue()));
}
}
} catch (ParseException e) {
LOG.warn("Failed to parse job counters", e);
}
}