本文整理汇总了Java中org.apache.hadoop.mapred.Counters.Counter类的典型用法代码示例。如果您正苦于以下问题:Java Counter类的具体用法?Java Counter怎么用?Java Counter使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
Counter类属于org.apache.hadoop.mapred.Counters包,在下文中一共展示了Counter类的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: checkLegacyNames
import org.apache.hadoop.mapred.Counters.Counter; //导入依赖的package包/类
@SuppressWarnings("deprecation")
private void checkLegacyNames(Counters counters) {
assertEquals("New name", 1, counters.findCounter(
TaskCounter.class.getName(), "MAP_INPUT_RECORDS").getValue());
assertEquals("Legacy name", 1, counters.findCounter(
"org.apache.hadoop.mapred.Task$Counter",
"MAP_INPUT_RECORDS").getValue());
assertEquals("Legacy enum", 1,
counters.findCounter(Task.Counter.MAP_INPUT_RECORDS).getValue());
assertEquals("New name", 1, counters.findCounter(
JobCounter.class.getName(), "DATA_LOCAL_MAPS").getValue());
assertEquals("Legacy name", 1, counters.findCounter(
"org.apache.hadoop.mapred.JobInProgress$Counter",
"DATA_LOCAL_MAPS").getValue());
assertEquals("Legacy enum", 1,
counters.findCounter(JobInProgress.Counter.DATA_LOCAL_MAPS).getValue());
assertEquals("New name", 1, counters.findCounter(
FileSystemCounter.class.getName(), "FILE_BYTES_READ").getValue());
assertEquals("New name and method", 1, counters.findCounter("file",
FileSystemCounter.BYTES_READ).getValue());
assertEquals("Legacy name", 1, counters.findCounter(
"FileSystemCounters",
"FILE_BYTES_READ").getValue());
}
示例2: testFileSystemGroupIteratorConcurrency
import org.apache.hadoop.mapred.Counters.Counter; //导入依赖的package包/类
@Test
public void testFileSystemGroupIteratorConcurrency() {
Counters counters = new Counters();
// create 2 filesystem counter groups
counters.findCounter("fs1", FileSystemCounter.BYTES_READ).increment(1);
counters.findCounter("fs2", FileSystemCounter.BYTES_READ).increment(1);
// Iterate over the counters in this group while updating counters in
// the group
Group group = counters.getGroup(FileSystemCounter.class.getName());
Iterator<Counter> iterator = group.iterator();
counters.findCounter("fs3", FileSystemCounter.BYTES_READ).increment(1);
assertTrue(iterator.hasNext());
iterator.next();
counters.findCounter("fs3", FileSystemCounter.BYTES_READ).increment(1);
assertTrue(iterator.hasNext());
iterator.next();
}
示例3: testFrameworkCounter
import org.apache.hadoop.mapred.Counters.Counter; //导入依赖的package包/类
@SuppressWarnings("rawtypes")
@Test
public void testFrameworkCounter() {
GroupFactory groupFactory = new GroupFactoryForTest();
FrameworkGroupFactory frameworkGroupFactory =
groupFactory.newFrameworkGroupFactory(JobCounter.class);
Group group = (Group) frameworkGroupFactory.newGroup("JobCounter");
FrameworkCounterGroup counterGroup =
(FrameworkCounterGroup) group.getUnderlyingGroup();
org.apache.hadoop.mapreduce.Counter count1 =
counterGroup.findCounter(JobCounter.NUM_FAILED_MAPS.toString());
Assert.assertNotNull(count1);
// Verify no exception get thrown when finding an unknown counter
org.apache.hadoop.mapreduce.Counter count2 =
counterGroup.findCounter("Unknown");
Assert.assertNull(count2);
}
示例4: testMergeShouldReturnProperProgress
import org.apache.hadoop.mapred.Counters.Counter; //导入依赖的package包/类
@SuppressWarnings( { "deprecation", "unchecked" })
public void testMergeShouldReturnProperProgress(
List<Segment<Text, Text>> segments) throws IOException {
Configuration conf = new Configuration();
JobConf jobConf = new JobConf();
FileSystem fs = FileSystem.getLocal(conf);
Path tmpDir = new Path("localpath");
Class<Text> keyClass = (Class<Text>) jobConf.getMapOutputKeyClass();
Class<Text> valueClass = (Class<Text>) jobConf.getMapOutputValueClass();
RawComparator<Text> comparator = jobConf.getOutputKeyComparator();
Counter readsCounter = new Counter();
Counter writesCounter = new Counter();
RawKeyValueIterator mergeQueue = Merger.merge(conf, fs, keyClass,
valueClass, segments, 2, tmpDir, comparator, getReporter(),
readsCounter, writesCounter);
Assert.assertEquals(1.0f, mergeQueue.getProgress().get());
}
示例5: getCounter
import org.apache.hadoop.mapred.Counters.Counter; //导入依赖的package包/类
public Counters.Counter getCounter(String group, String name) {
Counters.Counter counter = null;
if (counters != null) {
counter = counters.findCounter(group, name);
if (counter == null) {
Group grp = counters.addGroup(group, group);
counter = grp.addCounter(name, name, 10);
}
}
return counter;
}
示例6: testCounterValue
import org.apache.hadoop.mapred.Counters.Counter; //导入依赖的package包/类
/**
* Verify counter value works
*/
@SuppressWarnings("deprecation")
@Test
public void testCounterValue() {
Counters counters = new Counters();
final int NUMBER_TESTS = 100;
final int NUMBER_INC = 10;
final Random rand = new Random();
for (int i = 0; i < NUMBER_TESTS; i++) {
long initValue = rand.nextInt();
long expectedValue = initValue;
Counter counter = counters.findCounter("foo", "bar");
counter.setValue(initValue);
assertEquals("Counter value is not initialized correctly",
expectedValue, counter.getValue());
for (int j = 0; j < NUMBER_INC; j++) {
int incValue = rand.nextInt();
counter.increment(incValue);
expectedValue += incValue;
assertEquals("Counter value is not incremented correctly",
expectedValue, counter.getValue());
}
expectedValue = rand.nextInt();
counter.setValue(expectedValue);
assertEquals("Counter value is not set correctly",
expectedValue, counter.getValue());
}
}
示例7: testWriteWithLegacyNames
import org.apache.hadoop.mapred.Counters.Counter; //导入依赖的package包/类
@SuppressWarnings("deprecation")
@Test
public void testWriteWithLegacyNames() {
Counters counters = new Counters();
counters.incrCounter(Task.Counter.MAP_INPUT_RECORDS, 1);
counters.incrCounter(JobInProgress.Counter.DATA_LOCAL_MAPS, 1);
counters.findCounter("FileSystemCounters", "FILE_BYTES_READ").increment(1);
checkLegacyNames(counters);
}
示例8: testGroupIteratorConcurrency
import org.apache.hadoop.mapred.Counters.Counter; //导入依赖的package包/类
@SuppressWarnings("deprecation")
@Test
public void testGroupIteratorConcurrency() {
Counters counters = new Counters();
counters.incrCounter("group1", "counter1", 1);
Group group = counters.getGroup("group1");
Iterator<Counter> iterator = group.iterator();
counters.incrCounter("group1", "counter2", 1);
iterator.next();
}
示例9: testFilesystemCounter
import org.apache.hadoop.mapred.Counters.Counter; //导入依赖的package包/类
@Test
public void testFilesystemCounter() {
GroupFactory groupFactory = new GroupFactoryForTest();
Group fsGroup = groupFactory.newFileSystemGroup();
org.apache.hadoop.mapreduce.Counter count1 =
fsGroup.findCounter("ANY_BYTES_READ");
Assert.assertNotNull(count1);
// Verify no exception get thrown when finding an unknown counter
org.apache.hadoop.mapreduce.Counter count2 =
fsGroup.findCounter("Unknown");
Assert.assertNull(count2);
}
示例10: validateCounters
import org.apache.hadoop.mapred.Counters.Counter; //导入依赖的package包/类
private void validateCounters() throws IOException {
Counters counters = job.running_.getCounters();
assertNotNull("Counters", counters);
Group group = counters.getGroup("UserCounters");
assertNotNull("Group", group);
Counter counter = group.getCounterForName("InputLines");
assertNotNull("Counter", counter);
assertEquals(3, counter.getCounter());
}
示例11: create
import org.apache.hadoop.mapred.Counters.Counter; //导入依赖的package包/类
public static <K, V> ICombineHandler create(TaskContext context)
throws IOException, ClassNotFoundException {
final JobConf conf = new JobConf(context.getConf());
conf.set(Constants.SERIALIZATION_FRAMEWORK,
String.valueOf(SerializationFramework.WRITABLE_SERIALIZATION.getType()));
String combinerClazz = conf.get(Constants.MAPRED_COMBINER_CLASS);
if (null == combinerClazz) {
combinerClazz = conf.get(MRJobConfig.COMBINE_CLASS_ATTR);
}
if (null == combinerClazz) {
return null;
} else {
LOG.info("NativeTask Combiner is enabled, class = " + combinerClazz);
}
final Counter combineInputCounter = context.getTaskReporter().getCounter(
TaskCounter.COMBINE_INPUT_RECORDS);
final CombinerRunner<K, V> combinerRunner = CombinerRunner.create(
conf, context.getTaskAttemptId(),
combineInputCounter, context.getTaskReporter(), null);
final INativeHandler nativeHandler = NativeBatchProcessor.create(
NAME, conf, DataChannel.INOUT);
@SuppressWarnings("unchecked")
final BufferPusher<K, V> pusher = new BufferPusher<K, V>((Class<K>)context.getInputKeyClass(),
(Class<V>)context.getInputValueClass(),
nativeHandler);
final BufferPuller puller = new BufferPuller(nativeHandler);
return new CombinerHandler<K, V>(nativeHandler, combinerRunner, puller, pusher);
}