当前位置: 首页>>代码示例>>Java>>正文


Java Limits类代码示例

本文整理汇总了Java中org.apache.hadoop.mapreduce.counters.Limits的典型用法代码示例。如果您正苦于以下问题:Java Limits类的具体用法?Java Limits怎么用?Java Limits使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


Limits类属于org.apache.hadoop.mapreduce.counters包,在下文中一共展示了Limits类的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: init

import org.apache.hadoop.mapreduce.counters.Limits; //导入依赖的package包/类
/**
 * Connect to the default {@link JobTracker}.
 * @param conf the job configuration.
 * @throws IOException
 */
public void init(JobConf conf) throws IOException {
  setConf(conf);
  Limits.init(conf);
  String tracker = conf.get("mapred.job.tracker", "local");
  tasklogtimeout = conf.getInt(
    TASKLOG_PULL_TIMEOUT_KEY, DEFAULT_TASKLOG_TIMEOUT);
  this.ugi = UserGroupInformation.getCurrentUser();
  if ("local".equals(tracker)) {
    conf.setNumMapTasks(1);
    this.jobSubmitClient = new LocalJobRunner(conf);
  } else if (!HAUtil.isHAEnabled(conf, tracker)) {
    this.jobSubmitClient = createRPCProxy(JobTracker.getAddress(conf), conf);
  } else {
    this.jobSubmitClient = createRPCProxy(tracker, conf);
  }

  // Read progress monitor poll interval from config. Default is 1 second.
  this.progMonitorPollIntervalMillis = conf.getInt(PROGRESS_MONITOR_POLL_INTERVAL_KEY,
      DEFAULT_MONITOR_POLL_INTERVAL);
  if (this.progMonitorPollIntervalMillis < 1) {
    LOG.warn(PROGRESS_MONITOR_POLL_INTERVAL_KEY + " has been set to an invalid value; "
        + " replacing with " + DEFAULT_MONITOR_POLL_INTERVAL);
    this.progMonitorPollIntervalMillis = DEFAULT_MONITOR_POLL_INTERVAL;
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:31,代码来源:JobClient.java

示例2: testMaxCounters

import org.apache.hadoop.mapreduce.counters.Limits; //导入依赖的package包/类
private void testMaxCounters(final Counters counters) {
  LOG.info("counters max="+ Limits.getCountersMax());
  for (int i = 0; i < Limits.getCountersMax(); ++i) {
    counters.findCounter("test", "test"+ i);
  }
  setExpected(counters);
  shouldThrow(LimitExceededException.class, new Runnable() {
    public void run() {
      counters.findCounter("test", "bad");
    }
  });
  checkExpected(counters);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:14,代码来源:TestCounters.java

示例3: testMaxGroups

import org.apache.hadoop.mapreduce.counters.Limits; //导入依赖的package包/类
private void testMaxGroups(final Counters counters) {
  LOG.info("counter groups max="+ Limits.getGroupsMax());
  for (int i = 0; i < Limits.getGroupsMax(); ++i) {
    // assuming COUNTERS_MAX > GROUPS_MAX
    counters.findCounter("test"+ i, "test");
  }
  setExpected(counters);
  shouldThrow(LimitExceededException.class, new Runnable() {
    public void run() {
      counters.findCounter("bad", "test");
    }
  });
  checkExpected(counters);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:15,代码来源:TestCounters.java

示例4: testResetOnDeserialize

import org.apache.hadoop.mapreduce.counters.Limits; //导入依赖的package包/类
@Test public void testResetOnDeserialize() throws IOException {
  // Allow only one counterGroup
  Configuration conf = new Configuration();
  conf.setInt(MRJobConfig.COUNTER_GROUPS_MAX_KEY, 1);
  Limits.init(conf);

  Counters countersWithOneGroup = new Counters();
  countersWithOneGroup.findCounter("firstOf1Allowed", "First group");
  boolean caughtExpectedException = false;
  try {
    countersWithOneGroup.findCounter("secondIsTooMany", "Second group");
  }
  catch (LimitExceededException _) {
    caughtExpectedException = true;
  }

  assertTrue("Did not throw expected exception",
      caughtExpectedException);

  Counters countersWithZeroGroups = new Counters();
  DataOutputBuffer out = new DataOutputBuffer();
  countersWithZeroGroups.write(out);

  DataInputBuffer in = new DataInputBuffer();
  in.reset(out.getData(), out.getLength());

  countersWithOneGroup.readFields(in);

  // After reset one should be able to add a group
  countersWithOneGroup.findCounter("firstGroupAfterReset", "After reset " +
      "limit should be set back to zero");
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:33,代码来源:TestCounters.java

示例5: HistoryViewer

import org.apache.hadoop.mapreduce.counters.Limits; //导入依赖的package包/类
/**
* Constructs the HistoryViewer object
* @param historyFile The fully qualified Path of the History File
* @param conf The Configuration file
* @param printAll Toggle to print all status to only killed/failed status
* @throws IOException
*/
 public HistoryViewer(String historyFile, 
                      Configuration conf,
                      boolean printAll) throws IOException {
   this.printAll = printAll;
   String errorMsg = "Unable to initialize History Viewer";
   try {
     Path jobFile = new Path(historyFile);
     fs = jobFile.getFileSystem(conf);
     String[] jobDetails =
       jobFile.getName().split("_");
     if (jobDetails.length < 2) {
       // NOT a valid name
       System.err.println("Ignore unrecognized file: " + jobFile.getName());
       throw new IOException(errorMsg);
     }
     final Path jobConfPath = new Path(jobFile.getParent(),  jobDetails[0]
         + "_" + jobDetails[1] + "_" + jobDetails[2] + "_conf.xml");
     final Configuration jobConf = new Configuration(conf);
     try {
       jobConf.addResource(fs.open(jobConfPath), jobConfPath.toString());
       Limits.reset(jobConf);
     } catch (FileNotFoundException fnf) {
       if (LOG.isWarnEnabled()) {
         LOG.warn("Missing job conf in history", fnf);
       }
     }
     JobHistoryParser parser = new JobHistoryParser(fs, jobFile);
     job = parser.parse();
     jobId = job.getJobId().toString();
   } catch(Exception e) {
     throw new IOException(errorMsg, e);
   }
 }
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:41,代码来源:HistoryViewer.java

示例6: init

import org.apache.hadoop.mapreduce.counters.Limits; //导入依赖的package包/类
/**
 * Connect to the default cluster
 * @param conf the job configuration.
 * @throws IOException
 */
public void init(JobConf conf) throws IOException {
  setConf(conf);
  Limits.init(conf);
  cluster = new Cluster(conf);
  clientUgi = UserGroupInformation.getCurrentUser();
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:12,代码来源:JobClient.java

示例7: testMaxCounters

import org.apache.hadoop.mapreduce.counters.Limits; //导入依赖的package包/类
private void testMaxCounters(final Counters counters) {
  LOG.info("counters max="+ Limits.COUNTERS_MAX);
  for (int i = 0; i < Limits.COUNTERS_MAX; ++i) {
    counters.findCounter("test", "test"+ i);
  }
  setExpected(counters);
  shouldThrow(LimitExceededException.class, new Runnable() {
    public void run() {
      counters.findCounter("test", "bad");
    }
  });
  checkExpected(counters);
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:14,代码来源:TestCounters.java

示例8: testMaxGroups

import org.apache.hadoop.mapreduce.counters.Limits; //导入依赖的package包/类
private void testMaxGroups(final Counters counters) {
  LOG.info("counter groups max="+ Limits.GROUPS_MAX);
  for (int i = 0; i < Limits.GROUPS_MAX; ++i) {
    // assuming COUNTERS_MAX > GROUPS_MAX
    counters.findCounter("test"+ i, "test");
  }
  setExpected(counters);
  shouldThrow(LimitExceededException.class, new Runnable() {
    public void run() {
      counters.findCounter("bad", "test");
    }
  });
  checkExpected(counters);
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:15,代码来源:TestCounters.java

示例9: GenericGroup

import org.apache.hadoop.mapreduce.counters.Limits; //导入依赖的package包/类
GenericGroup(String name, String displayName, Limits limits) {
  super(name, displayName, limits);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:4,代码来源:Counters.java

示例10: newGenericGroup

import org.apache.hadoop.mapreduce.counters.Limits; //导入依赖的package包/类
@Override
protected Group newGenericGroup(String name, String displayName,
                                Limits limits) {
  return new Group(new GenericGroup(name, displayName, limits));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:6,代码来源:Counters.java

示例11: newGenericGroup

import org.apache.hadoop.mapreduce.counters.Limits; //导入依赖的package包/类
@Override
protected CounterGroup newGenericGroup(String name, String displayName,
                                       Limits limits) {
  return new GenericGroup(name, displayName, limits);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:6,代码来源:Counters.java


注:本文中的org.apache.hadoop.mapreduce.counters.Limits类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。