当前位置: 首页>>代码示例>>Java>>正文


Java JobConf.getBoolean方法代码示例

本文整理汇总了Java中org.apache.hadoop.mapred.JobConf.getBoolean方法的典型用法代码示例。如果您正苦于以下问题:Java JobConf.getBoolean方法的具体用法?Java JobConf.getBoolean怎么用?Java JobConf.getBoolean使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.mapred.JobConf的用法示例。


在下文中一共展示了JobConf.getBoolean方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: configure

import org.apache.hadoop.mapred.JobConf; //导入方法依赖的package包/类
/** Mapper configuration.
 * Extracts source and destination file system, as well as
 * top-level paths on source and destination directories.
 * Gets the named file systems, to be used later in map.
 */
public void configure(JobConf job)
{
  destPath = new Path(job.get(DST_DIR_LABEL, "/"));
  try {
    destFileSys = destPath.getFileSystem(job);
  } catch (IOException ex) {
    throw new RuntimeException("Unable to get the named file system.", ex);
  }
  sizeBuf = job.getInt("copy.buf.size", 128 * 1024);
  buffer = new byte[sizeBuf];
  ignoreReadFailures = job.getBoolean(Options.IGNORE_READ_FAILURES.propertyname, false);
  preserve_status = job.getBoolean(Options.PRESERVE_STATUS.propertyname, false);
  if (preserve_status) {
    preseved = FileAttribute.parse(job.get(PRESERVE_STATUS_LABEL));
  }
  update = job.getBoolean(Options.UPDATE.propertyname, false);
  overwrite = !update && job.getBoolean(Options.OVERWRITE.propertyname, false);
  skipCRCCheck = job.getBoolean(Options.SKIPCRC.propertyname, false);
  this.job = job;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:DistCpV1.java

示例2: configure

import org.apache.hadoop.mapred.JobConf; //导入方法依赖的package包/类
public void configure(JobConf job) {
  super.configure(job);
  //disable the auto increment of the counter. For streaming, no of 
  //processed records could be different(equal or less) than the no of 
  //records input.
  SkipBadRecords.setAutoIncrReducerProcCount(job, false);
  skipping = job.getBoolean(MRJobConfig.SKIP_RECORDS, false);

  try {
    reduceOutFieldSeparator = job_.get("stream.reduce.output.field.separator", "\t").getBytes("UTF-8");
    reduceInputFieldSeparator = job_.get("stream.reduce.input.field.separator", "\t").getBytes("UTF-8");
    this.numOfReduceOutputKeyFields = job_.getInt("stream.num.reduce.output.key.fields", 1);
  } catch (UnsupportedEncodingException e) {
    throw new RuntimeException("The current system does not support UTF-8 encoding!", e);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:PipeReducer.java

示例3: configure

import org.apache.hadoop.mapred.JobConf; //导入方法依赖的package包/类
public void configure(JobConf job) {
  super.configure(job);
  //disable the auto increment of the counter. For streaming, no of 
  //processed records could be different(equal or less) than the no of 
  //records input.
  SkipBadRecords.setAutoIncrMapperProcCount(job, false);
  skipping = job.getBoolean(MRJobConfig.SKIP_RECORDS, false);
  if (mapInputWriterClass_.getCanonicalName().equals(TextInputWriter.class.getCanonicalName())) {
    String inputFormatClassName = job.getClass("mapred.input.format.class", TextInputFormat.class).getCanonicalName();
    ignoreKey = job.getBoolean("stream.map.input.ignoreKey", 
      inputFormatClassName.equals(TextInputFormat.class.getCanonicalName()));
  }
  
  try {
    mapOutputFieldSeparator = job.get("stream.map.output.field.separator", "\t").getBytes("UTF-8");
    mapInputFieldSeparator = job.get("stream.map.input.field.separator", "\t").getBytes("UTF-8");
    numOfMapOutputKeyFields = job.getInt("stream.num.map.output.key.fields", 1);
  } catch (UnsupportedEncodingException e) {
    throw new RuntimeException("The current system does not support UTF-8 encoding!", e);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:PipeMapper.java

示例4: testSetReducerWithReducerByValueAsTrue

import org.apache.hadoop.mapred.JobConf; //导入方法依赖的package包/类
@Test
public void testSetReducerWithReducerByValueAsTrue() throws Exception {

  JobConf jobConf = new JobConf();
  JobConf reducerConf = new JobConf();
  Chain.setReducer(jobConf, MyReducer.class, Object.class, Object.class,
      Object.class, Object.class, true, reducerConf);
  boolean reduceByValue = reducerConf.getBoolean("chain.reducer.byValue",
      false);
  Assert.assertEquals("It should set chain.reducer.byValue as true "
      + "in reducerConf when we give value as true", true, reduceByValue);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:TestChain.java

示例5: testSetReducerWithReducerByValueAsFalse

import org.apache.hadoop.mapred.JobConf; //导入方法依赖的package包/类
@Test
public void testSetReducerWithReducerByValueAsFalse() throws Exception {

  JobConf jobConf = new JobConf();
  JobConf reducerConf = new JobConf();
  Chain.setReducer(jobConf, MyReducer.class, Object.class, Object.class,
      Object.class, Object.class, false, reducerConf);
  boolean reduceByValue = reducerConf.getBoolean("chain.reducer.byValue",
      true);
  Assert.assertEquals("It should set chain.reducer.byValue as false "
      + "in reducerConf when we give value as false", false, reduceByValue);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:TestChain.java

示例6: configure

import org.apache.hadoop.mapred.JobConf; //导入方法依赖的package包/类
public void configure(JobConf job) {
  this.job = job;
  //disable the auto increment of the counter. For pipes, no of processed 
  //records could be different(equal or less) than the no of records input.
  SkipBadRecords.setAutoIncrReducerProcCount(job, false);
  skipping = job.getBoolean(MRJobConfig.SKIP_RECORDS, false);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:PipesReducer.java

示例7: ShuffleSchedulerImpl

import org.apache.hadoop.mapred.JobConf; //导入方法依赖的package包/类
public ShuffleSchedulerImpl(JobConf job, TaskStatus status,
                        TaskAttemptID reduceId,
                        ExceptionReporter reporter,
                        Progress progress,
                        Counters.Counter shuffledMapsCounter,
                        Counters.Counter reduceShuffleBytes,
                        Counters.Counter failedShuffleCounter) {
  totalMaps = job.getNumMapTasks();
  abortFailureLimit = Math.max(30, totalMaps / 10);
  copyTimeTracker = new CopyTimeTracker();
  remainingMaps = totalMaps;
  finishedMaps = new boolean[remainingMaps];
  this.reporter = reporter;
  this.status = status;
  this.reduceId = reduceId;
  this.progress = progress;
  this.shuffledMapsCounter = shuffledMapsCounter;
  this.reduceShuffleBytes = reduceShuffleBytes;
  this.failedShuffleCounter = failedShuffleCounter;
  this.startTime = Time.monotonicNow();
  lastProgressTime = startTime;
  referee.start();
  this.maxFailedUniqueFetches = Math.min(totalMaps, 5);
  this.maxFetchFailuresBeforeReporting = job.getInt(
      MRJobConfig.SHUFFLE_FETCH_FAILURES, REPORT_FAILURE_LIMIT);
  this.reportReadErrorImmediately = job.getBoolean(
      MRJobConfig.SHUFFLE_NOTIFY_READERROR, true);

  this.maxDelay = job.getLong(MRJobConfig.MAX_SHUFFLE_FETCH_RETRY_DELAY,
      MRJobConfig.DEFAULT_MAX_SHUFFLE_FETCH_RETRY_DELAY);
  this.maxHostFailures = job.getInt(
      MRJobConfig.MAX_SHUFFLE_FETCH_HOST_FAILURES,
      MRJobConfig.DEFAULT_MAX_SHUFFLE_FETCH_HOST_FAILURES);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:35,代码来源:ShuffleSchedulerImpl.java

示例8: HiveReaderSetting

import org.apache.hadoop.mapred.JobConf; //导入方法依赖的package包/类
public HiveReaderSetting( final FileSplit split, final JobConf job ){
  config = new Configuration();

  disableSkipBlock = job.getBoolean( "mds.disable.block.skip" , false );
  disableFilterPushdown = job.getBoolean( "mds.disable.filter.pushdown" , false );

  Set<String> pathNameSet= createPathSet( split.getPath() );
  List<ExprNodeGenericFuncDesc> filterExprs = new ArrayList<ExprNodeGenericFuncDesc>();
  String filterExprSerialized = job.get( TableScanDesc.FILTER_EXPR_CONF_STR );
  if( filterExprSerialized != null ){
    filterExprs.add( SerializationUtilities.deserializeExpression(filterExprSerialized) );
  }

  MapWork mapWork;
  try{
    mapWork = Utilities.getMapWork(job);
  }catch( Exception e ){
    mapWork = null;
  }

  if( mapWork == null ){
    node = createExpressionNode( filterExprs );
    isVectorModeFlag = false;
    return;
  }

  node = createExpressionNode( filterExprs );

  for( Map.Entry<String,PartitionDesc> pathsAndParts: mapWork.getPathToPartitionInfo().entrySet() ){
    if( ! pathNameSet.contains( pathsAndParts.getKey() ) ){
      continue;
    }
    Properties props = pathsAndParts.getValue().getTableDesc().getProperties();
    if( props.containsKey( "mds.expand" ) ){
      config.set( "spread.reader.expand.column" , props.getProperty( "mds.expand" ) );
    }
    if( props.containsKey( "mds.flatten" ) ){
      config.set( "spread.reader.flatten.column" , props.getProperty( "mds.flatten" ) );
    }
  }

  config.set( "spread.reader.read.column.names" , createReadColumnNames( job.get( ColumnProjectionUtils.READ_COLUMN_NAMES_CONF_STR , null ) ) );

  // Next Hive vesion;
  // Utilities.getUseVectorizedInputFileFormat(job)
  isVectorModeFlag = Utilities.isVectorMode( job );
}
 
开发者ID:yahoojapan,项目名称:multiple-dimension-spread,代码行数:48,代码来源:HiveReaderSetting.java

示例9: getInputDirRecursive

import org.apache.hadoop.mapred.JobConf; //导入方法依赖的package包/类
public static boolean getInputDirRecursive(JobConf job) {
  return job.getBoolean(INPUT_DIR_RECURSIVE, false);
}
 
开发者ID:Tencent,项目名称:angel,代码行数:4,代码来源:HdfsUtil.java

示例10: TaskImpl

import org.apache.hadoop.mapred.JobConf; //导入方法依赖的package包/类
public TaskImpl(JobId jobId, TaskType taskType, int partition,
    EventHandler eventHandler, Path remoteJobConfFile, JobConf conf,
    TaskAttemptListener taskAttemptListener,
    Token<JobTokenIdentifier> jobToken,
    Credentials credentials, Clock clock,
    int appAttemptId, MRAppMetrics metrics, AppContext appContext) {
  this.conf = conf;
  this.clock = clock;
  this.jobFile = remoteJobConfFile;
  ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
  readLock = readWriteLock.readLock();
  writeLock = readWriteLock.writeLock();
  this.attempts = Collections.emptyMap();
  this.finishedAttempts = new HashSet<TaskAttemptId>(2);
  this.failedAttempts = new HashSet<TaskAttemptId>(2);
  this.inProgressAttempts = new HashSet<TaskAttemptId>(2);
  // This overridable method call is okay in a constructor because we
  //  have a convention that none of the overrides depends on any
  //  fields that need initialization.
  maxAttempts = getMaxAttempts();
  taskId = MRBuilderUtils.newTaskId(jobId, partition, taskType);
  this.partition = partition;
  this.taskAttemptListener = taskAttemptListener;
  this.eventHandler = eventHandler;
  this.credentials = credentials;
  this.jobToken = jobToken;
  this.metrics = metrics;
  this.appContext = appContext;
  this.encryptedShuffle = conf.getBoolean(MRConfig.SHUFFLE_SSL_ENABLED_KEY,
                                          MRConfig.SHUFFLE_SSL_ENABLED_DEFAULT);

  // This "this leak" is okay because the retained pointer is in an
  //  instance variable.
  stateMachine = stateMachineFactory.make(this);

  // All the new TaskAttemptIDs are generated based on MR
  // ApplicationAttemptID so that attempts from previous lives don't
  // over-step the current one. This assumes that a task won't have more
  // than 1000 attempts in its single generation, which is very reasonable.
  nextAttemptNumber = (appAttemptId - 1) * 1000;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:42,代码来源:TaskImpl.java

示例11: configure

import org.apache.hadoop.mapred.JobConf; //导入方法依赖的package包/类
/** {@inheritDoc} */
public void configure(JobConf job) {
  this.jobconf = job;
  ignoreFailures=job.getBoolean(Option.IGNORE_FAILURES.propertyname,false);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:6,代码来源:DistCh.java

示例12: getIsJavaRecordReader

import org.apache.hadoop.mapred.JobConf; //导入方法依赖的package包/类
/**
 * Check whether the job is using a Java RecordReader
 * @param conf the configuration to check
 * @return is it a Java RecordReader?
 */
public static boolean getIsJavaRecordReader(JobConf conf) {
  return conf.getBoolean(Submitter.IS_JAVA_RR, false);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:9,代码来源:Submitter.java

示例13: getIsJavaMapper

import org.apache.hadoop.mapred.JobConf; //导入方法依赖的package包/类
/**
 * Check whether the job is using a Java Mapper.
 * @param conf the configuration to check
 * @return is it a Java Mapper?
 */
public static boolean getIsJavaMapper(JobConf conf) {
  return conf.getBoolean(Submitter.IS_JAVA_MAP, false);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:9,代码来源:Submitter.java

示例14: getIsJavaReducer

import org.apache.hadoop.mapred.JobConf; //导入方法依赖的package包/类
/**
 * Check whether the job is using a Java Reducer.
 * @param conf the configuration to check
 * @return is it a Java Reducer?
 */
public static boolean getIsJavaReducer(JobConf conf) {
  return conf.getBoolean(Submitter.IS_JAVA_REDUCE, false);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:9,代码来源:Submitter.java

示例15: getIsJavaRecordWriter

import org.apache.hadoop.mapred.JobConf; //导入方法依赖的package包/类
/**
 * Will the reduce use a Java RecordWriter?
 * @param conf the configuration to check
 * @return true, if the output of the job will be written by Java
 */
public static boolean getIsJavaRecordWriter(JobConf conf) {
  return conf.getBoolean(Submitter.IS_JAVA_RW, false);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:9,代码来源:Submitter.java


注:本文中的org.apache.hadoop.mapred.JobConf.getBoolean方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。