当前位置: 首页>>代码示例>>Java>>正文


Java LineRecordReader.initialize方法代码示例

本文整理汇总了Java中org.apache.hadoop.mapreduce.lib.input.LineRecordReader.initialize方法的典型用法代码示例。如果您正苦于以下问题:Java LineRecordReader.initialize方法的具体用法?Java LineRecordReader.initialize怎么用?Java LineRecordReader.initialize使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.mapreduce.lib.input.LineRecordReader的用法示例。


在下文中一共展示了LineRecordReader.initialize方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: initialize

import org.apache.hadoop.mapreduce.lib.input.LineRecordReader; //导入方法依赖的package包/类
@Override
public void initialize(InputSplit inputSplit, TaskAttemptContext context) throws IOException
{
  key = new Text();
  value = new MapWritable();
  jsonParser = new JSONParser();

  lineReader = new LineRecordReader();
  lineReader.initialize(inputSplit, context);

  queryString = context.getConfiguration().get("query", "?q=*");

  // Load the data schemas
  FileSystem fs = FileSystem.get(context.getConfiguration());
  try
  {
    SystemConfiguration.setProperty("data.schemas", context.getConfiguration().get("data.schemas"));
    DataSchemaLoader.initialize(true, fs);
  } catch (Exception e)
  {
    e.printStackTrace();
  }
  String dataSchemaName = context.getConfiguration().get("dataSchemaName");
  dataSchema = DataSchemaRegistry.get(dataSchemaName);
}
 
开发者ID:apache,项目名称:incubator-pirk,代码行数:26,代码来源:JSONRecordReader.java

示例2: initialize

import org.apache.hadoop.mapreduce.lib.input.LineRecordReader; //导入方法依赖的package包/类
/**
 * Called once at initialization to initialize the RecordReader.
 *
 * @param genericSplit the split that defines the range of records to read.
 * @param context the information about the task.
 * @throws IOException on IO Error.
 */
@Override
public void initialize(InputSplit genericSplit, TaskAttemptContext context)
    throws IOException {
  if (LOG.isDebugEnabled()) {
    try {
      LOG.debug("initialize('{}', '{}')",
          HadoopToStringUtil.toString(genericSplit), HadoopToStringUtil.toString(context));
    } catch (InterruptedException ie) {
      LOG.debug("InterruptedException during HadoopToStringUtil.toString", ie);
    }
  }
  Preconditions.checkArgument(genericSplit instanceof FileSplit,
      "InputSplit genericSplit should be an instance of FileSplit.");
  // Get FileSplit.
  FileSplit fileSplit = (FileSplit) genericSplit;
  // Create the JsonParser.
  jsonParser = new JsonParser();
  // Initialize the LineRecordReader.
  lineReader = new LineRecordReader();
  lineReader.initialize(fileSplit, context);
}
 
开发者ID:GoogleCloudPlatform,项目名称:bigdata-interop,代码行数:29,代码来源:GsonRecordReader.java

示例3: initialize

import org.apache.hadoop.mapreduce.lib.input.LineRecordReader; //导入方法依赖的package包/类
@Override
public void initialize(InputSplit inputSplit, TaskAttemptContext taskAttemptContext)
  throws IOException, InterruptedException {
  lineRecordReader = new LineRecordReader();
  lineRecordReader.initialize(inputSplit, taskAttemptContext);
  currentKey = new ImmutableBytesWritable();
  parser = new JSONParser();
  skipBadLines = taskAttemptContext.getConfiguration().getBoolean(
    SKIP_LINES_CONF_KEY, true);
}
 
开发者ID:lhfei,项目名称:hbase-in-action,代码行数:11,代码来源:BulkImportJobExample.java

示例4: close

import org.apache.hadoop.mapreduce.lib.input.LineRecordReader; //导入方法依赖的package包/类
@Override
@SuppressWarnings("squid:S2095") // recordReader is closed explictly in the close() method
public void initialize(InputSplit split, TaskAttemptContext context) throws IOException,
    InterruptedException
{
  if (split instanceof FileSplit)
  {
    FileSplit fsplit = (FileSplit) split;
    delimitedParser = getDelimitedParser(fsplit.getPath().toString(),
        context.getConfiguration());
    recordReader = new LineRecordReader();
    recordReader.initialize(fsplit, context);
    // Skip the first
    if (delimitedParser.getSkipFirstLine())
    {
      // Only skip the first line of the first split. The other
      // splits are somewhere in the middle of the original file,
      // so their first lines should not be skipped.
      if (fsplit.getStart() != 0)
      {
        nextKeyValue();
      }
    }
  }
  else
  {
    throw new IOException("input split is not a FileSplit");
  }
}
 
开发者ID:ngageoint,项目名称:mrgeo,代码行数:30,代码来源:DelimitedVectorRecordReader.java

示例5: initializeNextReader

import org.apache.hadoop.mapreduce.lib.input.LineRecordReader; //导入方法依赖的package包/类
private void initializeNextReader() throws IOException {

			rdr = new LineRecordReader();
			rdr.initialize(
					new FileSplit(split.getPath(currentSplit),

					split.getOffset(currentSplit), split
							.getLength(currentSplit), null), context);

			++currentSplit;
		}
 
开发者ID:Pivotal-Field-Engineering,项目名称:pmr-common,代码行数:12,代码来源:CombineTextInputFormat.java

示例6: initialize

import org.apache.hadoop.mapreduce.lib.input.LineRecordReader; //导入方法依赖的package包/类
@Override
public void initialize(InputSplit split, TaskAttemptContext context)
		throws IOException, InterruptedException {

	rdr = new LineRecordReader();
	rdr.initialize(split, context);
}
 
开发者ID:Pivotal-Field-Engineering,项目名称:pmr-common,代码行数:8,代码来源:JsonInputFormat.java

示例7: initialize

import org.apache.hadoop.mapreduce.lib.input.LineRecordReader; //导入方法依赖的package包/类
@Override
public void initialize(InputSplit inputSplit, TaskAttemptContext attempt)
		throws IOException, InterruptedException {
	lineReader = new LineRecordReader();
	lineReader.initialize(inputSplit, attempt);		
			
}
 
开发者ID:willddy,项目名称:bigdata_pattern,代码行数:8,代码来源:LogFileRecordReader.java

示例8: initialize

import org.apache.hadoop.mapreduce.lib.input.LineRecordReader; //导入方法依赖的package包/类
public void initialize(InputSplit genericSplit, TaskAttemptContext context) throws IOException
{
	lineReader = new LineRecordReader();
	lineReader.initialize(genericSplit, context);

	split = (FileSplit)genericSplit;
	value = null;
}
 
开发者ID:ilveroluca,项目名称:seal,代码行数:9,代码来源:SamInputFormat.java


注:本文中的org.apache.hadoop.mapreduce.lib.input.LineRecordReader.initialize方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。