當前位置: 首頁>>代碼示例>>Java>>正文


Java JobConf.get方法代碼示例

本文整理匯總了Java中org.apache.hadoop.mapred.JobConf.get方法的典型用法代碼示例。如果您正苦於以下問題:Java JobConf.get方法的具體用法?Java JobConf.get怎麽用?Java JobConf.get使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.mapred.JobConf的用法示例。


在下文中一共展示了JobConf.get方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: configure

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
@Override // MapReduceBase
public void configure(JobConf conf) {
  try {
    config = new ConfigExtractor(conf);
    ConfigExtractor.dumpOptions(config);
    filesystem = config.getBaseDirectory().getFileSystem(conf);
  } catch (Exception e) {
    LOG.error("Unable to setup slive " + StringUtils.stringifyException(e));
    throw new RuntimeException("Unable to setup slive configuration", e);
  }
  if(conf.get(MRJobConfig.TASK_ATTEMPT_ID) != null ) {
    this.taskId = TaskAttemptID.forName(conf.get(MRJobConfig.TASK_ATTEMPT_ID))
      .getTaskID().getId();
  } else {
    // So that branch-1/0.20 can run this same code as well
    this.taskId = TaskAttemptID.forName(conf.get("mapred.task.id"))
        .getTaskID().getId();
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:20,代碼來源:SliveMapper.java

示例2: finalize

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
static private void finalize(Configuration conf, JobConf jobconf,
    final Path destPath, String presevedAttributes) throws IOException {
  if (presevedAttributes == null) {
    return;
  }
  EnumSet<FileAttribute> preseved = FileAttribute.parse(presevedAttributes);
  if (!preseved.contains(FileAttribute.USER)
      && !preseved.contains(FileAttribute.GROUP)
      && !preseved.contains(FileAttribute.PERMISSION)) {
    return;
  }

  FileSystem dstfs = destPath.getFileSystem(conf);
  Path dstdirlist = new Path(jobconf.get(DST_DIR_LIST_LABEL));
  try (SequenceFile.Reader in =
      new SequenceFile.Reader(jobconf, Reader.file(dstdirlist))) {
    Text dsttext = new Text();
    FilePair pair = new FilePair(); 
    for(; in.next(dsttext, pair); ) {
      Path absdst = new Path(destPath, pair.output);
      updateDestStatus(pair.input, dstfs.getFileStatus(absdst),
          preseved, dstfs);
    }
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:26,代碼來源:DistCpV1.java

示例3: validateInput

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
public void validateInput(JobConf job) throws IOException {
  // expecting exactly one path
  Path [] tableNames = FileInputFormat.getInputPaths(job);
  if (tableNames == null || tableNames.length > 1) {
    throw new IOException("expecting one table name");
  }

  // connected to table?
  if (getHTable() == null) {
    throw new IOException("could not connect to table '" +
      tableNames[0].getName() + "'");
  }

  // expecting at least one column
  String colArg = job.get(COLUMN_LIST);
  if (colArg == null || colArg.length() == 0) {
    throw new IOException("expecting at least one column");
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:20,代碼來源:TableInputFormat.java

示例4: getInputPaths

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
public static Path[] getInputPaths(JobConf context) {
  String dirs = context.get(INPUT_DIR, "");
  LOG.info("dirs=" + dirs);
  String[] list = StringUtils.split(dirs);
  Path[] result = new Path[list.length];
  for (int i = 0; i < list.length; i++) {
    result[i] = new Path(StringUtils.unEscapeString(list[i]));
  }
  return result;
}
 
開發者ID:Tencent,項目名稱:angel,代碼行數:11,代碼來源:HdfsUtil.java

示例5: checkOutputSpecs

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
@Override
public void checkOutputSpecs(FileSystem ignored, JobConf job)
throws FileAlreadyExistsException, InvalidJobConfException, IOException {
  String tableName = job.get(OUTPUT_TABLE);
  if (tableName == null) {
    throw new IOException("Must specify table name");
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:9,代碼來源:TableOutputFormat.java

示例6: configure

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
public void configure(JobConf job) {
  String userTypeName = job.get(USER_TYPE_NAME_KEY);
  if (null == userTypeName) {
    throw new RuntimeException("Unconfigured parameter: "
        + USER_TYPE_NAME_KEY);
  }

  LOG.info("User type name set to " + userTypeName);

  this.userRecord = null;

  try {
    Configuration conf = new Configuration();
    Class userClass = Class.forName(userTypeName, true,
        Thread.currentThread().getContextClassLoader());
    this.userRecord =
        (SqoopRecord) ReflectionUtils.newInstance(userClass, conf);
  } catch (ClassNotFoundException cnfe) {
    // handled by the next block.
    LOG.error("ClassNotFound exception: " + cnfe.toString());
  } catch (Exception e) {
    LOG.error("Got an exception reflecting user class: " + e.toString());
  }

  if (null == this.userRecord) {
    LOG.error("Could not instantiate user record of type " + userTypeName);
    throw new RuntimeException("Could not instantiate user record of type "
        + userTypeName);
  }
}
 
開發者ID:aliyun,項目名稱:aliyun-maxcompute-data-collectors,代碼行數:31,代碼來源:ReparseMapper.java

示例7: getPipeCommand

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
String getPipeCommand(JobConf job) {
  String str = job.get("stream.combine.streamprocessor");
  try {
    if (str != null) {
      return URLDecoder.decode(str, "UTF-8");
    }
  } catch (UnsupportedEncodingException e) {
    System.err.println("stream.combine.streamprocessor" + 
                       " in jobconf not found");
  }
  return null;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:13,代碼來源:PipeCombiner.java

示例8: initialize

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
@Override
protected void initialize(JobConf job) throws IOException {
  Path[] tableNames = FileInputFormat.getInputPaths(job);
  String colArg = job.get(COLUMN_LIST);
  String[] colNames = colArg.split(" ");
  byte [][] m_cols = new byte[colNames.length][];
  for (int i = 0; i < m_cols.length; i++) {
    m_cols[i] = Bytes.toBytes(colNames[i]);
  }
  setInputColumns(m_cols);
  Connection connection = ConnectionFactory.createConnection(job);
  initializeTable(connection, TableName.valueOf(tableNames[0].getName()));
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:14,代碼來源:TableInputFormat.java

示例9: getPipeCommand

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
String getPipeCommand(JobConf job) {
  String str = job.get("stream.map.streamprocessor");
  if (str == null) {
    return str;
  }
  try {
    return URLDecoder.decode(str, "UTF-8");
  }
  catch (UnsupportedEncodingException e) {
    System.err.println("stream.map.streamprocessor in jobconf not found");
    return null;
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:14,代碼來源:PipeMapper.java

示例10: configure

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
public void configure(JobConf jconf) {
  conf = jconf;
  try {
    // read the cached files (unzipped, unjarred and text)
    // and put it into a single file TEST_ROOT_DIR/test.txt
    String TEST_ROOT_DIR = jconf.get("test.build.data","/tmp");
    Path file = new Path("file:///", TEST_ROOT_DIR);
    FileSystem fs = FileSystem.getLocal(conf);
    if (!fs.mkdirs(file)) {
      throw new IOException("Mkdirs failed to create " + file.toString());
    }
    Path fileOut = new Path(file, "test.txt");
    fs.delete(fileOut, true);
    DataOutputStream out = fs.create(fileOut); 
    String[] symlinks = new String[6];
    symlinks[0] = ".";
    symlinks[1] = "testjar";
    symlinks[2] = "testzip";
    symlinks[3] = "testtgz";
    symlinks[4] = "testtargz";
    symlinks[5] = "testtar";

    for (int i = 0; i < symlinks.length; i++) {
      // read out the files from these archives
      File f = new File(symlinks[i]);
      File txt = new File(f, "test.txt");
      FileInputStream fin = new FileInputStream(txt);
      BufferedReader reader = new BufferedReader(new InputStreamReader(fin));
      String str = reader.readLine();
      reader.close();
      out.writeBytes(str);
      out.writeBytes("\n");
    }
    out.close();
  } catch (IOException ie) {
    System.out.println(StringUtils.stringifyException(ie));
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:39,代碼來源:MRCaching.java

示例11: getPipeCommand

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
String getPipeCommand(JobConf job) {
  String str = job.get("stream.reduce.streamprocessor");
  if (str == null) {
    return str;
  }
  try {
    return URLDecoder.decode(str, "UTF-8");
  } catch (UnsupportedEncodingException e) {
    System.err.println("stream.reduce.streamprocessor in jobconf not found");
    return null;
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:13,代碼來源:PipeReducer.java

示例12: addInputPath

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
/**
 * Add a {@link Path} with a custom {@link InputFormat} to the list of
 * inputs for the map-reduce job.
 * 
 * @param conf The configuration of the job
 * @param path {@link Path} to be added to the list of inputs for the job
 * @param inputFormatClass {@link InputFormat} class to use for this path
 */
public static void addInputPath(JobConf conf, Path path,
    Class<? extends InputFormat> inputFormatClass) {

  String inputFormatMapping = path.toString() + ";"
     + inputFormatClass.getName();
  String inputFormats = conf.get("mapreduce.input.multipleinputs.dir.formats");
  conf.set("mapreduce.input.multipleinputs.dir.formats",
     inputFormats == null ? inputFormatMapping : inputFormats + ","
         + inputFormatMapping);

  conf.setInputFormat(DelegatingInputFormat.class);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:21,代碼來源:MultipleInputs.java

示例13: configure

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
public void configure(JobConf conf) {
  this.conf = conf;

  // this is tightly tied to map reduce
  // since it does not expose an api 
  // to get the partition
  partId = conf.getInt(MRJobConfig.TASK_PARTITION, -1);
  // create a file name using the partition
  // we need to write to this directory
  tmpOutputDir = FileOutputFormat.getWorkOutputPath(conf);
  blockSize = conf.getLong(HAR_BLOCKSIZE_LABEL, blockSize);
  // get the output path and write to the tmp 
  // directory 
  partname = "part-" + partId;
  tmpOutput = new Path(tmpOutputDir, partname);
  rootPath = (conf.get(SRC_PARENT_LABEL, null) == null) ? null :
              new Path(conf.get(SRC_PARENT_LABEL));
  if (rootPath == null) {
    throw new RuntimeException("Unable to read parent " +
    		"path for har from config");
  }
  try {
    destFs = tmpOutput.getFileSystem(conf);
    //this was a stale copy
    if (destFs.exists(tmpOutput)) {
      destFs.delete(tmpOutput, false);
    } 
    partStream = destFs.create(tmpOutput, false, conf.getInt("io.file.buffer.size", 4096), 
        destFs.getDefaultReplication(tmpOutput), blockSize);
  } catch(IOException ie) {
    throw new RuntimeException("Unable to open output file " + tmpOutput, ie);
  }
  buffer = new byte[buf_size];
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:35,代碼來源:HadoopArchives.java

示例14: getSplits

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
/**
 * Produce splits such that each is no greater than the quotient of the
 * total size and the number of splits requested.
 * @param job The handle to the JobConf object
 * @param numSplits Number of splits requested
 */
public InputSplit[] getSplits(JobConf job, int numSplits
    ) throws IOException {
  final int srcCount = job.getInt(OP_COUNT_LABEL, -1);
  final int targetcount = srcCount / numSplits;
  String srclist = job.get(OP_LIST_LABEL, "");
  if (srcCount < 0 || "".equals(srclist)) {
    throw new RuntimeException("Invalid metadata: #files(" + srcCount +
                               ") listuri(" + srclist + ")");
  }
  Path srcs = new Path(srclist);
  FileSystem fs = srcs.getFileSystem(job);

  List<FileSplit> splits = new ArrayList<FileSplit>(numSplits);

  Text key = new Text();
  FileOperation value = new FileOperation();
  long prev = 0L;
  int count = 0; //count src
  try (SequenceFile.Reader in = new SequenceFile.Reader(fs, srcs, job)) {
    for ( ; in.next(key, value); ) {
      long curr = in.getPosition();
      long delta = curr - prev;
      if (++count > targetcount) {
        count = 0;
        splits.add(new FileSplit(srcs, prev, delta, (String[])null));
        prev = curr;
      }
    }
  }
  long remaining = fs.getFileStatus(srcs).getLen() - prev;
  if (remaining != 0) {
    splits.add(new FileSplit(srcs, prev, remaining, (String[])null));
  }
  LOG.info("numSplits="  + numSplits + ", splits.size()=" + splits.size());
  return splits.toArray(new FileSplit[splits.size()]);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:43,代碼來源:DistCh.java

示例15: HiveReaderSetting

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
public HiveReaderSetting( final FileSplit split, final JobConf job ){
  config = new Configuration();

  disableSkipBlock = job.getBoolean( "mds.disable.block.skip" , false );
  disableFilterPushdown = job.getBoolean( "mds.disable.filter.pushdown" , false );

  Set<String> pathNameSet= createPathSet( split.getPath() );
  List<ExprNodeGenericFuncDesc> filterExprs = new ArrayList<ExprNodeGenericFuncDesc>();
  String filterExprSerialized = job.get( TableScanDesc.FILTER_EXPR_CONF_STR );
  if( filterExprSerialized != null ){
    filterExprs.add( SerializationUtilities.deserializeExpression(filterExprSerialized) );
  }

  MapWork mapWork;
  try{
    mapWork = Utilities.getMapWork(job);
  }catch( Exception e ){
    mapWork = null;
  }

  if( mapWork == null ){
    node = createExpressionNode( filterExprs );
    isVectorModeFlag = false;
    return;
  }

  node = createExpressionNode( filterExprs );

  for( Map.Entry<String,PartitionDesc> pathsAndParts: mapWork.getPathToPartitionInfo().entrySet() ){
    if( ! pathNameSet.contains( pathsAndParts.getKey() ) ){
      continue;
    }
    Properties props = pathsAndParts.getValue().getTableDesc().getProperties();
    if( props.containsKey( "mds.expand" ) ){
      config.set( "spread.reader.expand.column" , props.getProperty( "mds.expand" ) );
    }
    if( props.containsKey( "mds.flatten" ) ){
      config.set( "spread.reader.flatten.column" , props.getProperty( "mds.flatten" ) );
    }
  }

  config.set( "spread.reader.read.column.names" , createReadColumnNames( job.get( ColumnProjectionUtils.READ_COLUMN_NAMES_CONF_STR , null ) ) );

  // Next Hive vesion;
  // Utilities.getUseVectorizedInputFileFormat(job)
  isVectorModeFlag = Utilities.isVectorMode( job );
}
 
開發者ID:yahoojapan,項目名稱:multiple-dimension-spread,代碼行數:48,代碼來源:HiveReaderSetting.java


注:本文中的org.apache.hadoop.mapred.JobConf.get方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。