本文整理汇总了Java中org.apache.tez.common.counters.TezCounter.increment方法的典型用法代码示例。如果您正苦于以下问题:Java TezCounter.increment方法的具体用法?Java TezCounter.increment怎么用?Java TezCounter.increment使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.tez.common.counters.TezCounter
的用法示例。
在下文中一共展示了TezCounter.increment方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: incrCounter
import org.apache.tez.common.counters.TezCounter; //导入方法依赖的package包/类
@Override
public boolean incrCounter(Enum<?> name, long delta) {
if (context == null) {
return false;
}
TezCounter counter = context.getCounters().findCounter(name);
counter.increment(delta);
return true;
}
示例2: Reader
import org.apache.tez.common.counters.TezCounter; //导入方法依赖的package包/类
/**
* Construct an IFile Reader.
*
* @param in The input stream
* @param length Length of the data in the stream, including the checksum
* bytes.
* @param codec codec
* @param readsCounter Counter for records read from disk
* @throws IOException
*/
public Reader(InputStream in, long length,
CompressionCodec codec,
TezCounter readsCounter, TezCounter bytesReadCounter,
boolean readAhead, int readAheadLength,
int bufferSize) throws IOException {
this(in, ((in != null) ? (length - HEADER.length) : length), codec,
readsCounter, bytesReadCounter, readAhead, readAheadLength,
bufferSize, ((in != null) ? isCompressedFlagEnabled(in) : false));
if (in != null && bytesReadCounter != null) {
bytesReadCounter.increment(IFile.HEADER.length);
}
}
示例3: run
import org.apache.tez.common.counters.TezCounter; //导入方法依赖的package包/类
@Override
public void run() throws Exception {
Preconditions.checkState(getInputs().size() == 2);
Preconditions.checkState(getOutputs().size() == 0);
LogicalInput lhsInput = getInputs().get(LHS_INPUT_NAME);
LogicalInput rhsInput = getInputs().get(RHS_INPUT_NAME);
Reader lhsReaderRaw = lhsInput.getReader();
Reader rhsReaderRaw = rhsInput.getReader();
Preconditions.checkState(lhsReaderRaw instanceof KeyValuesReader);
Preconditions.checkState(rhsReaderRaw instanceof KeyValuesReader);
KeyValuesReader lhsReader = (KeyValuesReader) lhsReaderRaw;
KeyValuesReader rhsReader = (KeyValuesReader) rhsReaderRaw;
TezCounter lhsMissingKeyCounter = getContext().getCounters().findCounter(COUNTER_GROUP_NAME,
MISSING_KEY_COUNTER_NAME);
while (lhsReader.next()) {
if (rhsReader.next()) {
if (!lhsReader.getCurrentKey().equals(rhsReader.getCurrentKey())) {
LOG.info("MismatchedKeys: " + "lhs=" + lhsReader.getCurrentKey() + ", rhs=" + rhsReader.getCurrentKey());
lhsMissingKeyCounter.increment(1);
}
} else {
lhsMissingKeyCounter.increment(1);
LOG.info("ExtraKey in lhs: " + lhsReader.getClass());
break;
}
}
if (rhsReader.next()) {
lhsMissingKeyCounter.increment(1);
LOG.info("ExtraKey in rhs: " + lhsReader.getClass());
}
}
示例4: getNewSplitDetailsFromDisk
import org.apache.tez.common.counters.TezCounter; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
public static org.apache.hadoop.mapreduce.InputSplit getNewSplitDetailsFromDisk(
TaskSplitIndex splitMetaInfo, JobConf jobConf, TezCounter splitBytesCounter)
throws IOException {
Path file = new Path(splitMetaInfo.getSplitLocation());
long offset = splitMetaInfo.getStartOffset();
// Split information read from local filesystem.
FileSystem fs = FileSystem.getLocal(jobConf);
file = fs.makeQualified(file);
LOG.info("Reading input split file from : " + file);
FSDataInputStream inFile = fs.open(file);
inFile.seek(offset);
String className = Text.readString(inFile);
Class<org.apache.hadoop.mapreduce.InputSplit> cls;
try {
cls = (Class<org.apache.hadoop.mapreduce.InputSplit>) jobConf.getClassByName(className);
} catch (ClassNotFoundException ce) {
IOException wrap = new IOException("Split class " + className + " not found");
wrap.initCause(ce);
throw wrap;
}
SerializationFactory factory = new SerializationFactory(jobConf);
Deserializer<org.apache.hadoop.mapreduce.InputSplit> deserializer = (Deserializer<org.apache.hadoop.mapreduce.InputSplit>) factory
.getDeserializer(cls);
deserializer.open(inFile);
org.apache.hadoop.mapreduce.InputSplit split = deserializer.deserialize(null);
long pos = inFile.getPos();
if (splitBytesCounter != null) {
splitBytesCounter.increment(pos - offset);
}
inFile.close();
return split;
}
示例5: getOldSplitDetailsFromDisk
import org.apache.tez.common.counters.TezCounter; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
public static InputSplit getOldSplitDetailsFromDisk(TaskSplitIndex splitMetaInfo,
JobConf jobConf, TezCounter splitBytesCounter) throws IOException {
Path file = new Path(splitMetaInfo.getSplitLocation());
FileSystem fs = FileSystem.getLocal(jobConf);
file = fs.makeQualified(file);
LOG.info("Reading input split file from : " + file);
long offset = splitMetaInfo.getStartOffset();
FSDataInputStream inFile = fs.open(file);
inFile.seek(offset);
String className = Text.readString(inFile);
Class<org.apache.hadoop.mapred.InputSplit> cls;
try {
cls = (Class<org.apache.hadoop.mapred.InputSplit>) jobConf.getClassByName(className);
} catch (ClassNotFoundException ce) {
IOException wrap = new IOException("Split class " + className + " not found");
wrap.initCause(ce);
throw wrap;
}
SerializationFactory factory = new SerializationFactory(jobConf);
Deserializer<org.apache.hadoop.mapred.InputSplit> deserializer = (Deserializer<org.apache.hadoop.mapred.InputSplit>) factory
.getDeserializer(cls);
deserializer.open(inFile);
org.apache.hadoop.mapred.InputSplit split = deserializer.deserialize(null);
long pos = inFile.getPos();
if (splitBytesCounter != null) {
splitBytesCounter.increment(pos - offset);
}
inFile.close();
return split;
}
示例6: incrementGcCounter
import org.apache.tez.common.counters.TezCounter; //导入方法依赖的package包/类
/**
* Increment the gc-elapsed-time counter.
*/
void incrementGcCounter() {
if (null == counters) {
return; // nothing to do.
}
TezCounter gcCounter = counters.findCounter(TaskCounter.GC_TIME_MILLIS);
if (null != gcCounter) {
gcCounter.increment(getElapsedGc());
}
}
示例7: run
import org.apache.tez.common.counters.TezCounter; //导入方法依赖的package包/类
@Override
public void run() throws Exception {
Preconditions.checkState(getInputs().size() == 2);
Preconditions.checkState(getOutputs().size() == 0);
LogicalInput lhsInput = getInputs().get(LHS_INPUT_NAME);
LogicalInput rhsInput = getInputs().get(RHS_INPUT_NAME);
Reader lhsReaderRaw = lhsInput.getReader();
Reader rhsReaderRaw = rhsInput.getReader();
Preconditions.checkState(lhsReaderRaw instanceof KeyValuesReader);
Preconditions.checkState(rhsReaderRaw instanceof KeyValuesReader);
KeyValuesReader lhsReader = (KeyValuesReader) lhsReaderRaw;
KeyValuesReader rhsReader = (KeyValuesReader) rhsReaderRaw;
boolean rhsReaderEnd = false;
TezCounter lhsMissingKeyCounter = getContext().getCounters().findCounter(COUNTER_GROUP_NAME,
MISSING_KEY_COUNTER_NAME);
while (lhsReader.next()) {
if (rhsReader.next()) {
if (!lhsReader.getCurrentKey().equals(rhsReader.getCurrentKey())) {
LOG.info("MismatchedKeys: " + "lhs=" + lhsReader.getCurrentKey() + ", rhs=" + rhsReader.getCurrentKey());
lhsMissingKeyCounter.increment(1);
}
} else {
lhsMissingKeyCounter.increment(1);
LOG.info("ExtraKey in lhs: " + lhsReader.getClass());
rhsReaderEnd = true;
break;
}
}
if (!rhsReaderEnd && rhsReader.next()) {
lhsMissingKeyCounter.increment(1);
LOG.info("ExtraKey in rhs: " + rhsReader.getClass());
}
}
示例8: incrementGcCounter
import org.apache.tez.common.counters.TezCounter; //导入方法依赖的package包/类
/**
* Increment the gc-elapsed-time counter.
*/
public void incrementGcCounter() {
if (null == counters) {
return; // nothing to do.
}
TezCounter gcCounter = counters.findCounter(TaskCounter.GC_TIME_MILLIS);
if (null != gcCounter) {
gcCounter.increment(getElapsedGc());
}
}