當前位置: 首頁>>代碼示例>>Java>>正文


Java JobConf.getBoolean方法代碼示例

本文整理匯總了Java中org.apache.hadoop.mapred.JobConf.getBoolean方法的典型用法代碼示例。如果您正苦於以下問題:Java JobConf.getBoolean方法的具體用法?Java JobConf.getBoolean怎麽用?Java JobConf.getBoolean使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.mapred.JobConf的用法示例。


在下文中一共展示了JobConf.getBoolean方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: configure

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
/** Mapper configuration.
 * Extracts source and destination file system, as well as
 * top-level paths on source and destination directories.
 * Gets the named file systems, to be used later in map.
 */
public void configure(JobConf job)
{
  destPath = new Path(job.get(DST_DIR_LABEL, "/"));
  try {
    destFileSys = destPath.getFileSystem(job);
  } catch (IOException ex) {
    throw new RuntimeException("Unable to get the named file system.", ex);
  }
  sizeBuf = job.getInt("copy.buf.size", 128 * 1024);
  buffer = new byte[sizeBuf];
  ignoreReadFailures = job.getBoolean(Options.IGNORE_READ_FAILURES.propertyname, false);
  preserve_status = job.getBoolean(Options.PRESERVE_STATUS.propertyname, false);
  if (preserve_status) {
    preseved = FileAttribute.parse(job.get(PRESERVE_STATUS_LABEL));
  }
  update = job.getBoolean(Options.UPDATE.propertyname, false);
  overwrite = !update && job.getBoolean(Options.OVERWRITE.propertyname, false);
  skipCRCCheck = job.getBoolean(Options.SKIPCRC.propertyname, false);
  this.job = job;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:26,代碼來源:DistCpV1.java

示例2: configure

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
public void configure(JobConf job) {
  super.configure(job);
  //disable the auto increment of the counter. For streaming, no of 
  //processed records could be different(equal or less) than the no of 
  //records input.
  SkipBadRecords.setAutoIncrReducerProcCount(job, false);
  skipping = job.getBoolean(MRJobConfig.SKIP_RECORDS, false);

  try {
    reduceOutFieldSeparator = job_.get("stream.reduce.output.field.separator", "\t").getBytes("UTF-8");
    reduceInputFieldSeparator = job_.get("stream.reduce.input.field.separator", "\t").getBytes("UTF-8");
    this.numOfReduceOutputKeyFields = job_.getInt("stream.num.reduce.output.key.fields", 1);
  } catch (UnsupportedEncodingException e) {
    throw new RuntimeException("The current system does not support UTF-8 encoding!", e);
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:17,代碼來源:PipeReducer.java

示例3: configure

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
public void configure(JobConf job) {
  super.configure(job);
  //disable the auto increment of the counter. For streaming, no of 
  //processed records could be different(equal or less) than the no of 
  //records input.
  SkipBadRecords.setAutoIncrMapperProcCount(job, false);
  skipping = job.getBoolean(MRJobConfig.SKIP_RECORDS, false);
  if (mapInputWriterClass_.getCanonicalName().equals(TextInputWriter.class.getCanonicalName())) {
    String inputFormatClassName = job.getClass("mapred.input.format.class", TextInputFormat.class).getCanonicalName();
    ignoreKey = job.getBoolean("stream.map.input.ignoreKey", 
      inputFormatClassName.equals(TextInputFormat.class.getCanonicalName()));
  }
  
  try {
    mapOutputFieldSeparator = job.get("stream.map.output.field.separator", "\t").getBytes("UTF-8");
    mapInputFieldSeparator = job.get("stream.map.input.field.separator", "\t").getBytes("UTF-8");
    numOfMapOutputKeyFields = job.getInt("stream.num.map.output.key.fields", 1);
  } catch (UnsupportedEncodingException e) {
    throw new RuntimeException("The current system does not support UTF-8 encoding!", e);
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:22,代碼來源:PipeMapper.java

示例4: testSetReducerWithReducerByValueAsTrue

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
@Test
public void testSetReducerWithReducerByValueAsTrue() throws Exception {

  JobConf jobConf = new JobConf();
  JobConf reducerConf = new JobConf();
  Chain.setReducer(jobConf, MyReducer.class, Object.class, Object.class,
      Object.class, Object.class, true, reducerConf);
  boolean reduceByValue = reducerConf.getBoolean("chain.reducer.byValue",
      false);
  Assert.assertEquals("It should set chain.reducer.byValue as true "
      + "in reducerConf when we give value as true", true, reduceByValue);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:13,代碼來源:TestChain.java

示例5: testSetReducerWithReducerByValueAsFalse

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
@Test
public void testSetReducerWithReducerByValueAsFalse() throws Exception {

  JobConf jobConf = new JobConf();
  JobConf reducerConf = new JobConf();
  Chain.setReducer(jobConf, MyReducer.class, Object.class, Object.class,
      Object.class, Object.class, false, reducerConf);
  boolean reduceByValue = reducerConf.getBoolean("chain.reducer.byValue",
      true);
  Assert.assertEquals("It should set chain.reducer.byValue as false "
      + "in reducerConf when we give value as false", false, reduceByValue);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:13,代碼來源:TestChain.java

示例6: configure

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
public void configure(JobConf job) {
  this.job = job;
  //disable the auto increment of the counter. For pipes, no of processed 
  //records could be different(equal or less) than the no of records input.
  SkipBadRecords.setAutoIncrReducerProcCount(job, false);
  skipping = job.getBoolean(MRJobConfig.SKIP_RECORDS, false);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:8,代碼來源:PipesReducer.java

示例7: ShuffleSchedulerImpl

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
public ShuffleSchedulerImpl(JobConf job, TaskStatus status,
                        TaskAttemptID reduceId,
                        ExceptionReporter reporter,
                        Progress progress,
                        Counters.Counter shuffledMapsCounter,
                        Counters.Counter reduceShuffleBytes,
                        Counters.Counter failedShuffleCounter) {
  totalMaps = job.getNumMapTasks();
  abortFailureLimit = Math.max(30, totalMaps / 10);
  copyTimeTracker = new CopyTimeTracker();
  remainingMaps = totalMaps;
  finishedMaps = new boolean[remainingMaps];
  this.reporter = reporter;
  this.status = status;
  this.reduceId = reduceId;
  this.progress = progress;
  this.shuffledMapsCounter = shuffledMapsCounter;
  this.reduceShuffleBytes = reduceShuffleBytes;
  this.failedShuffleCounter = failedShuffleCounter;
  this.startTime = Time.monotonicNow();
  lastProgressTime = startTime;
  referee.start();
  this.maxFailedUniqueFetches = Math.min(totalMaps, 5);
  this.maxFetchFailuresBeforeReporting = job.getInt(
      MRJobConfig.SHUFFLE_FETCH_FAILURES, REPORT_FAILURE_LIMIT);
  this.reportReadErrorImmediately = job.getBoolean(
      MRJobConfig.SHUFFLE_NOTIFY_READERROR, true);

  this.maxDelay = job.getLong(MRJobConfig.MAX_SHUFFLE_FETCH_RETRY_DELAY,
      MRJobConfig.DEFAULT_MAX_SHUFFLE_FETCH_RETRY_DELAY);
  this.maxHostFailures = job.getInt(
      MRJobConfig.MAX_SHUFFLE_FETCH_HOST_FAILURES,
      MRJobConfig.DEFAULT_MAX_SHUFFLE_FETCH_HOST_FAILURES);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:35,代碼來源:ShuffleSchedulerImpl.java

示例8: HiveReaderSetting

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
public HiveReaderSetting( final FileSplit split, final JobConf job ){
  config = new Configuration();

  disableSkipBlock = job.getBoolean( "mds.disable.block.skip" , false );
  disableFilterPushdown = job.getBoolean( "mds.disable.filter.pushdown" , false );

  Set<String> pathNameSet= createPathSet( split.getPath() );
  List<ExprNodeGenericFuncDesc> filterExprs = new ArrayList<ExprNodeGenericFuncDesc>();
  String filterExprSerialized = job.get( TableScanDesc.FILTER_EXPR_CONF_STR );
  if( filterExprSerialized != null ){
    filterExprs.add( SerializationUtilities.deserializeExpression(filterExprSerialized) );
  }

  MapWork mapWork;
  try{
    mapWork = Utilities.getMapWork(job);
  }catch( Exception e ){
    mapWork = null;
  }

  if( mapWork == null ){
    node = createExpressionNode( filterExprs );
    isVectorModeFlag = false;
    return;
  }

  node = createExpressionNode( filterExprs );

  for( Map.Entry<String,PartitionDesc> pathsAndParts: mapWork.getPathToPartitionInfo().entrySet() ){
    if( ! pathNameSet.contains( pathsAndParts.getKey() ) ){
      continue;
    }
    Properties props = pathsAndParts.getValue().getTableDesc().getProperties();
    if( props.containsKey( "mds.expand" ) ){
      config.set( "spread.reader.expand.column" , props.getProperty( "mds.expand" ) );
    }
    if( props.containsKey( "mds.flatten" ) ){
      config.set( "spread.reader.flatten.column" , props.getProperty( "mds.flatten" ) );
    }
  }

  config.set( "spread.reader.read.column.names" , createReadColumnNames( job.get( ColumnProjectionUtils.READ_COLUMN_NAMES_CONF_STR , null ) ) );

  // Next Hive vesion;
  // Utilities.getUseVectorizedInputFileFormat(job)
  isVectorModeFlag = Utilities.isVectorMode( job );
}
 
開發者ID:yahoojapan,項目名稱:multiple-dimension-spread,代碼行數:48,代碼來源:HiveReaderSetting.java

示例9: getInputDirRecursive

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
public static boolean getInputDirRecursive(JobConf job) {
  return job.getBoolean(INPUT_DIR_RECURSIVE, false);
}
 
開發者ID:Tencent,項目名稱:angel,代碼行數:4,代碼來源:HdfsUtil.java

示例10: TaskImpl

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
public TaskImpl(JobId jobId, TaskType taskType, int partition,
    EventHandler eventHandler, Path remoteJobConfFile, JobConf conf,
    TaskAttemptListener taskAttemptListener,
    Token<JobTokenIdentifier> jobToken,
    Credentials credentials, Clock clock,
    int appAttemptId, MRAppMetrics metrics, AppContext appContext) {
  this.conf = conf;
  this.clock = clock;
  this.jobFile = remoteJobConfFile;
  ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
  readLock = readWriteLock.readLock();
  writeLock = readWriteLock.writeLock();
  this.attempts = Collections.emptyMap();
  this.finishedAttempts = new HashSet<TaskAttemptId>(2);
  this.failedAttempts = new HashSet<TaskAttemptId>(2);
  this.inProgressAttempts = new HashSet<TaskAttemptId>(2);
  // This overridable method call is okay in a constructor because we
  //  have a convention that none of the overrides depends on any
  //  fields that need initialization.
  maxAttempts = getMaxAttempts();
  taskId = MRBuilderUtils.newTaskId(jobId, partition, taskType);
  this.partition = partition;
  this.taskAttemptListener = taskAttemptListener;
  this.eventHandler = eventHandler;
  this.credentials = credentials;
  this.jobToken = jobToken;
  this.metrics = metrics;
  this.appContext = appContext;
  this.encryptedShuffle = conf.getBoolean(MRConfig.SHUFFLE_SSL_ENABLED_KEY,
                                          MRConfig.SHUFFLE_SSL_ENABLED_DEFAULT);

  // This "this leak" is okay because the retained pointer is in an
  //  instance variable.
  stateMachine = stateMachineFactory.make(this);

  // All the new TaskAttemptIDs are generated based on MR
  // ApplicationAttemptID so that attempts from previous lives don't
  // over-step the current one. This assumes that a task won't have more
  // than 1000 attempts in its single generation, which is very reasonable.
  nextAttemptNumber = (appAttemptId - 1) * 1000;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:42,代碼來源:TaskImpl.java

示例11: configure

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
/** {@inheritDoc} */
public void configure(JobConf job) {
  this.jobconf = job;
  ignoreFailures=job.getBoolean(Option.IGNORE_FAILURES.propertyname,false);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:6,代碼來源:DistCh.java

示例12: getIsJavaRecordReader

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
/**
 * Check whether the job is using a Java RecordReader
 * @param conf the configuration to check
 * @return is it a Java RecordReader?
 */
public static boolean getIsJavaRecordReader(JobConf conf) {
  return conf.getBoolean(Submitter.IS_JAVA_RR, false);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:9,代碼來源:Submitter.java

示例13: getIsJavaMapper

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
/**
 * Check whether the job is using a Java Mapper.
 * @param conf the configuration to check
 * @return is it a Java Mapper?
 */
public static boolean getIsJavaMapper(JobConf conf) {
  return conf.getBoolean(Submitter.IS_JAVA_MAP, false);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:9,代碼來源:Submitter.java

示例14: getIsJavaReducer

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
/**
 * Check whether the job is using a Java Reducer.
 * @param conf the configuration to check
 * @return is it a Java Reducer?
 */
public static boolean getIsJavaReducer(JobConf conf) {
  return conf.getBoolean(Submitter.IS_JAVA_REDUCE, false);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:9,代碼來源:Submitter.java

示例15: getIsJavaRecordWriter

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
/**
 * Will the reduce use a Java RecordWriter?
 * @param conf the configuration to check
 * @return true, if the output of the job will be written by Java
 */
public static boolean getIsJavaRecordWriter(JobConf conf) {
  return conf.getBoolean(Submitter.IS_JAVA_RW, false);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:9,代碼來源:Submitter.java


注:本文中的org.apache.hadoop.mapred.JobConf.getBoolean方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。