本文整理汇总了Java中org.apache.hadoop.mapreduce.counters.Limits类的典型用法代码示例。如果您正苦于以下问题:Java Limits类的具体用法?Java Limits怎么用?Java Limits使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
Limits类属于org.apache.hadoop.mapreduce.counters包,在下文中一共展示了Limits类的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: init
import org.apache.hadoop.mapreduce.counters.Limits; //导入依赖的package包/类
/**
* Connect to the default {@link JobTracker}.
* @param conf the job configuration.
* @throws IOException
*/
public void init(JobConf conf) throws IOException {
setConf(conf);
Limits.init(conf);
String tracker = conf.get("mapred.job.tracker", "local");
tasklogtimeout = conf.getInt(
TASKLOG_PULL_TIMEOUT_KEY, DEFAULT_TASKLOG_TIMEOUT);
this.ugi = UserGroupInformation.getCurrentUser();
if ("local".equals(tracker)) {
conf.setNumMapTasks(1);
this.jobSubmitClient = new LocalJobRunner(conf);
} else if (!HAUtil.isHAEnabled(conf, tracker)) {
this.jobSubmitClient = createRPCProxy(JobTracker.getAddress(conf), conf);
} else {
this.jobSubmitClient = createRPCProxy(tracker, conf);
}
// Read progress monitor poll interval from config. Default is 1 second.
this.progMonitorPollIntervalMillis = conf.getInt(PROGRESS_MONITOR_POLL_INTERVAL_KEY,
DEFAULT_MONITOR_POLL_INTERVAL);
if (this.progMonitorPollIntervalMillis < 1) {
LOG.warn(PROGRESS_MONITOR_POLL_INTERVAL_KEY + " has been set to an invalid value; "
+ " replacing with " + DEFAULT_MONITOR_POLL_INTERVAL);
this.progMonitorPollIntervalMillis = DEFAULT_MONITOR_POLL_INTERVAL;
}
}
示例2: testMaxCounters
import org.apache.hadoop.mapreduce.counters.Limits; //导入依赖的package包/类
private void testMaxCounters(final Counters counters) {
LOG.info("counters max="+ Limits.getCountersMax());
for (int i = 0; i < Limits.getCountersMax(); ++i) {
counters.findCounter("test", "test"+ i);
}
setExpected(counters);
shouldThrow(LimitExceededException.class, new Runnable() {
public void run() {
counters.findCounter("test", "bad");
}
});
checkExpected(counters);
}
示例3: testMaxGroups
import org.apache.hadoop.mapreduce.counters.Limits; //导入依赖的package包/类
private void testMaxGroups(final Counters counters) {
LOG.info("counter groups max="+ Limits.getGroupsMax());
for (int i = 0; i < Limits.getGroupsMax(); ++i) {
// assuming COUNTERS_MAX > GROUPS_MAX
counters.findCounter("test"+ i, "test");
}
setExpected(counters);
shouldThrow(LimitExceededException.class, new Runnable() {
public void run() {
counters.findCounter("bad", "test");
}
});
checkExpected(counters);
}
示例4: testResetOnDeserialize
import org.apache.hadoop.mapreduce.counters.Limits; //导入依赖的package包/类
@Test public void testResetOnDeserialize() throws IOException {
// Allow only one counterGroup
Configuration conf = new Configuration();
conf.setInt(MRJobConfig.COUNTER_GROUPS_MAX_KEY, 1);
Limits.init(conf);
Counters countersWithOneGroup = new Counters();
countersWithOneGroup.findCounter("firstOf1Allowed", "First group");
boolean caughtExpectedException = false;
try {
countersWithOneGroup.findCounter("secondIsTooMany", "Second group");
}
catch (LimitExceededException _) {
caughtExpectedException = true;
}
assertTrue("Did not throw expected exception",
caughtExpectedException);
Counters countersWithZeroGroups = new Counters();
DataOutputBuffer out = new DataOutputBuffer();
countersWithZeroGroups.write(out);
DataInputBuffer in = new DataInputBuffer();
in.reset(out.getData(), out.getLength());
countersWithOneGroup.readFields(in);
// After reset one should be able to add a group
countersWithOneGroup.findCounter("firstGroupAfterReset", "After reset " +
"limit should be set back to zero");
}
示例5: HistoryViewer
import org.apache.hadoop.mapreduce.counters.Limits; //导入依赖的package包/类
/**
* Constructs the HistoryViewer object
* @param historyFile The fully qualified Path of the History File
* @param conf The Configuration file
* @param printAll Toggle to print all status to only killed/failed status
* @throws IOException
*/
public HistoryViewer(String historyFile,
Configuration conf,
boolean printAll) throws IOException {
this.printAll = printAll;
String errorMsg = "Unable to initialize History Viewer";
try {
Path jobFile = new Path(historyFile);
fs = jobFile.getFileSystem(conf);
String[] jobDetails =
jobFile.getName().split("_");
if (jobDetails.length < 2) {
// NOT a valid name
System.err.println("Ignore unrecognized file: " + jobFile.getName());
throw new IOException(errorMsg);
}
final Path jobConfPath = new Path(jobFile.getParent(), jobDetails[0]
+ "_" + jobDetails[1] + "_" + jobDetails[2] + "_conf.xml");
final Configuration jobConf = new Configuration(conf);
try {
jobConf.addResource(fs.open(jobConfPath), jobConfPath.toString());
Limits.reset(jobConf);
} catch (FileNotFoundException fnf) {
if (LOG.isWarnEnabled()) {
LOG.warn("Missing job conf in history", fnf);
}
}
JobHistoryParser parser = new JobHistoryParser(fs, jobFile);
job = parser.parse();
jobId = job.getJobId().toString();
} catch(Exception e) {
throw new IOException(errorMsg, e);
}
}
示例6: init
import org.apache.hadoop.mapreduce.counters.Limits; //导入依赖的package包/类
/**
* Connect to the default cluster
* @param conf the job configuration.
* @throws IOException
*/
public void init(JobConf conf) throws IOException {
setConf(conf);
Limits.init(conf);
cluster = new Cluster(conf);
clientUgi = UserGroupInformation.getCurrentUser();
}
示例7: testMaxCounters
import org.apache.hadoop.mapreduce.counters.Limits; //导入依赖的package包/类
private void testMaxCounters(final Counters counters) {
LOG.info("counters max="+ Limits.COUNTERS_MAX);
for (int i = 0; i < Limits.COUNTERS_MAX; ++i) {
counters.findCounter("test", "test"+ i);
}
setExpected(counters);
shouldThrow(LimitExceededException.class, new Runnable() {
public void run() {
counters.findCounter("test", "bad");
}
});
checkExpected(counters);
}
示例8: testMaxGroups
import org.apache.hadoop.mapreduce.counters.Limits; //导入依赖的package包/类
private void testMaxGroups(final Counters counters) {
LOG.info("counter groups max="+ Limits.GROUPS_MAX);
for (int i = 0; i < Limits.GROUPS_MAX; ++i) {
// assuming COUNTERS_MAX > GROUPS_MAX
counters.findCounter("test"+ i, "test");
}
setExpected(counters);
shouldThrow(LimitExceededException.class, new Runnable() {
public void run() {
counters.findCounter("bad", "test");
}
});
checkExpected(counters);
}
示例9: GenericGroup
import org.apache.hadoop.mapreduce.counters.Limits; //导入依赖的package包/类
GenericGroup(String name, String displayName, Limits limits) {
super(name, displayName, limits);
}
示例10: newGenericGroup
import org.apache.hadoop.mapreduce.counters.Limits; //导入依赖的package包/类
@Override
protected Group newGenericGroup(String name, String displayName,
Limits limits) {
return new Group(new GenericGroup(name, displayName, limits));
}
示例11: newGenericGroup
import org.apache.hadoop.mapreduce.counters.Limits; //导入依赖的package包/类
@Override
protected CounterGroup newGenericGroup(String name, String displayName,
Limits limits) {
return new GenericGroup(name, displayName, limits);
}