当前位置: 首页>>代码示例>>Java>>正文


Java Context类代码示例

本文整理汇总了Java中org.apache.hadoop.mapreduce.Mapper.Context的典型用法代码示例。如果您正苦于以下问题:Java Context类的具体用法?Java Context怎么用?Java Context使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


Context类属于org.apache.hadoop.mapreduce.Mapper包,在下文中一共展示了Context类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getSentiFile

import org.apache.hadoop.mapreduce.Mapper.Context; //导入依赖的package包/类
private void getSentiFile(Context context) throws IOException {
   	Configuration conf = context.getConfiguration();
   	String swnPath = conf.get("sentwordnetfile");
   	System.out.println("@@@ Path: " + swnPath);
   	this.linhas = new ArrayList<String>();
       try{
        Path pt=new Path(swnPath);
        FileSystem fs = FileSystem.get(new Configuration());
        BufferedReader br=new BufferedReader(new InputStreamReader(fs.open(pt)));
        String line;
        line=br.readLine();
        while (line != null){
                linhas.add(line);
                line=br.readLine();
        }
	}catch(Exception e){
		System.out.println("@@@@ ERRO: " + e.getMessage());
		throw new IOException(e);
	}   
       sdc = new SentiWordNetDemoCode(linhas);
}
 
开发者ID:cleuton,项目名称:bigdatasample,代码行数:22,代码来源:TokenizerMapper.java

示例2: map

import org.apache.hadoop.mapreduce.Mapper.Context; //导入依赖的package包/类
@Override
public void map(NullWritable key, NullWritable value, Context context) throws IOException, InterruptedException {

  int counter = 0;
  
  System.out.println("starting mapper");
  System.out.println();
  for (int i = 0; i < numberOfRecords; i++) {
    String keyRoot = StringUtils.leftPad(Integer.toString(r.nextInt(Short.MAX_VALUE)), 5, '0');

    if (i % 1000 == 0) {
      System.out.print(".");
    }

    for (int j = 0; j < 10; j++) {
      hKey.set(Bytes.toBytes(keyRoot + "|" + runID + "|" + taskId));
      kv = new KeyValue(hKey.get(), columnFamily, Bytes.toBytes("C" + j), Bytes.toBytes("counter:" + counter++ ));
      context.write(hKey, kv);
    }
  }

  System.out.println("finished mapper");
}
 
开发者ID:tmalaska,项目名称:HBase-ToHDFS,代码行数:24,代码来源:PopulateTable.java

示例3: map

import org.apache.hadoop.mapreduce.Mapper.Context; //导入依赖的package包/类
public void map(LongWritable key, Writable value, Context context)
  throws IOException, InterruptedException {
  try {
    String str = value.toString();
    if (value instanceof Text) {
      writer.write(str, 0, str.length());
      writer.newLine();
    } else if (value instanceof SqoopRecord) {
      writer.write(str, 0, str.length());
    }
  } catch (Exception e) {
    doExecuteUpdate("DROP TABLE " + tmpTableName);
    cleanup(context);
    throw new IOException(e);
  }
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:17,代码来源:PGBulkloadExportMapper.java

示例4: map

import org.apache.hadoop.mapreduce.Mapper.Context; //导入依赖的package包/类
@Override
public void map(LongWritable key, Writable value, Context context)
  throws IOException, InterruptedException {
  line.setLength(0);
  line.append(value.toString());
  if (value instanceof Text) {
    line.append(System.getProperty("line.separator"));
  }
  try {
    byte[]data = line.toString().getBytes("UTF-8");
    copyin.writeToCopy(data, 0, data.length);
  } catch (SQLException ex) {
    LoggingUtils.logAll(LOG, "Unable to execute copy", ex);
    close();
    throw new IOException(ex);
  }
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:18,代码来源:PostgreSQLCopyExportMapper.java

示例5: setup

import org.apache.hadoop.mapreduce.Mapper.Context; //导入依赖的package包/类
@Override
protected void setup(Context context) throws IOException, InterruptedException {
  super.setup(context);

  Configuration conf = context.getConfiguration();

  // Instantiate a copy of the user's class to hold and parse the record.
  String recordClassName = conf.get(
      ExportJobBase.SQOOP_EXPORT_TABLE_CLASS_KEY);
  if (null == recordClassName) {
    throw new IOException("Export table class name ("
        + ExportJobBase.SQOOP_EXPORT_TABLE_CLASS_KEY
        + ") is not set!");
  }

  try {
    Class cls = Class.forName(recordClassName, true,
        Thread.currentThread().getContextClassLoader());
    recordImpl = (SqoopRecord) ReflectionUtils.newInstance(cls, conf);
  } catch (ClassNotFoundException cnfe) {
    throw new IOException(cnfe);
  }

  if (null == recordImpl) {
    throw new IOException("Could not instantiate object of type "
        + recordClassName);
  }

  columnTypes = DefaultStringifier.load(conf, AVRO_COLUMN_TYPES_MAP,
      MapWritable.class);
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:32,代码来源:ParquetExportMapper.java

示例6: map

import org.apache.hadoop.mapreduce.Mapper.Context; //导入依赖的package包/类
public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {

			String line = value.toString();

			line = line.trim().toLowerCase();
			line = line.replaceAll("[^a-z]+", " ");
			String words[] = line.split("\\s+"); //split by ' ', '\t', '\n', etc.
			
			if(words.length < 2) {
				return;
			}
			
			StringBuilder sb;
			for (int i = 0; i < words.length-1; i++) {
				sb = new StringBuilder();
				for (int j = 0;  i + j < words.length && j < noGram; j++) {
					sb.append(" ");
					sb.append(words[i + j]);
					context.write(new Text(sb.toString().trim()), new IntWritable(1));
				}
			}
		}
 
开发者ID:yogykwan,项目名称:mapreduce-samples,代码行数:23,代码来源:NGramLibraryBuilder.java

示例7: setup

import org.apache.hadoop.mapreduce.Mapper.Context; //导入依赖的package包/类
public void setup(Context context) throws IOException, InterruptedException {
	Configuration conf = context.getConfiguration();

	 multipleOutputs = new MultipleOutputs(context);
	lowerBoundary = conf.get("LOWER_DATE");
	upperBoundary = conf.get("HIGHER_DATE");

}
 
开发者ID:gatripat,项目名称:InsAdjustment,代码行数:9,代码来源:CSVparserMapper.java

示例8: JobHistoryFileReplayHelper

import org.apache.hadoop.mapreduce.Mapper.Context; //导入依赖的package包/类
JobHistoryFileReplayHelper(Context context) throws IOException {
  Configuration conf = context.getConfiguration();
  int taskId = context.getTaskAttemptID().getTaskID().getId();
  int size = conf.getInt(MRJobConfig.NUM_MAPS,
      TimelineServicePerformance.NUM_MAPS_DEFAULT);
  replayMode = conf.getInt(JobHistoryFileReplayHelper.REPLAY_MODE,
          JobHistoryFileReplayHelper.REPLAY_MODE_DEFAULT);
  String processingDir =
      conf.get(JobHistoryFileReplayHelper.PROCESSING_PATH);

  Path processingPath = new Path(processingDir);
  FileSystem processingFs = processingPath.getFileSystem(conf);
  parser = new JobHistoryFileParser(processingFs);
  jobFiles = selectJobFiles(processingFs, processingPath, taskId, size);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:16,代码来源:JobHistoryFileReplayHelper.java

示例9: testLoadMapper

import org.apache.hadoop.mapreduce.Mapper.Context; //导入依赖的package包/类
@SuppressWarnings({"rawtypes", "unchecked"})
@Test (timeout=10000)
public void testLoadMapper() throws Exception {

  Configuration conf = new Configuration();
  conf.setInt(JobContext.NUM_REDUCES, 2);

  CompressionEmulationUtil.setCompressionEmulationEnabled(conf, true);
  conf.setBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS, true);

  TaskAttemptID taskId = new TaskAttemptID();
  RecordReader<NullWritable, GridmixRecord> reader = new FakeRecordReader();

  LoadRecordGkGrWriter writer = new LoadRecordGkGrWriter();

  OutputCommitter committer = new CustomOutputCommitter();
  StatusReporter reporter = new TaskAttemptContextImpl.DummyReporter();
  LoadSplit split = getLoadSplit();

  MapContext<NullWritable, GridmixRecord, GridmixKey, GridmixRecord> mapContext = new MapContextImpl<NullWritable, GridmixRecord, GridmixKey, GridmixRecord>(
          conf, taskId, reader, writer, committer, reporter, split);
  // context
  Context ctx = new WrappedMapper<NullWritable, GridmixRecord, GridmixKey, GridmixRecord>()
          .getMapContext(mapContext);

  reader.initialize(split, ctx);
  ctx.getConfiguration().setBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS, true);
  CompressionEmulationUtil.setCompressionEmulationEnabled(
          ctx.getConfiguration(), true);

  LoadJob.LoadMapper mapper = new LoadJob.LoadMapper();
  // setup, map, clean
  mapper.run(ctx);

  Map<GridmixKey, GridmixRecord> data = writer.getData();
  // check result
  assertEquals(2, data.size());

}
 
开发者ID:yncxcw,项目名称:big-c,代码行数:40,代码来源:TestGridMixClasses.java

示例10: setup

import org.apache.hadoop.mapreduce.Mapper.Context; //导入依赖的package包/类
@Override
protected void setup(Context context) throws IOException, InterruptedException {
    Configuration conf = context.getConfiguration();
    overrideRdfContext = conf.getBoolean(OVERRIDE_CONTEXT_PROPERTY, false);
    String defCtx = conf.get(DEFAULT_CONTEXT_PROPERTY);
    defaultRdfContext = defCtx == null ? null : SimpleValueFactory.getInstance().createIRI(defCtx);
    decimationFactor = conf.getInt(DECIMATION_FACTOR_PROPERTY, DEFAULT_DECIMATION_FACTOR);
    for (byte b = 1; b < 6; b++) {
        context.write(new ImmutableBytesWritable(new byte[] {b}), new LongWritable(1));
    }
    timestamp = conf.getLong(DEFAULT_TIMESTAMP_PROPERTY, System.currentTimeMillis());
}
 
开发者ID:Merck,项目名称:Halyard,代码行数:13,代码来源:HalyardPreSplit.java

示例11: setup

import org.apache.hadoop.mapreduce.Mapper.Context; //导入依赖的package包/类
@Override
protected void setup(Context context)
		throws IOException, InterruptedException {
	// TODO Auto-generated method stu
	super.setup(context);
	//read data to memory on the mapper.
	myMap = new HashMap<String,String>();
	Configuration conf = context.getConfiguration();
	String mybusinessdataPath = conf.get("businessdata");
	//e.g /user/hue/input/
	Path part=new Path("hdfs://cshadoop1"+mybusinessdataPath);//Location of file in HDFS
	
	
	FileSystem fs = FileSystem.get(conf);
	FileStatus[] fss = fs.listStatus(part);
    for (FileStatus status : fss) {
        Path pt = status.getPath();
        
        BufferedReader br=new BufferedReader(new InputStreamReader(fs.open(pt)));
        String line;
        line=br.readLine();
        while (line != null){
        	String[] arr=line.split("\\^");
        	if(arr.length == 3){
            myMap.put(arr[0].trim(), line); //businessid and the remain datacolumns
        	}
            line=br.readLine();
        }
       
    }
	
	
	
      
}
 
开发者ID:BhargaviRavula,项目名称:Bigdata,代码行数:36,代码来源:UserRatedStanford.java

示例12: map

import org.apache.hadoop.mapreduce.Mapper.Context; //导入依赖的package包/类
public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
	//from ratings
	
	String[] mydata = value.toString().split("\\^");
	
	if (mydata.length == 3){
			if (mydata[1].contains("Palo Alto")){
			context.write(new Text(mydata[0].trim()),new IntWritable(1));
			}
			
		/*if("review".compareTo(mydata[mydata.length-2].trim())== 0){
			
			context.write(new Text(mydata[mydata.length-2].trim()),new IntWritable(1));
		}
		if("user".compareTo(mydata[mydata.length-2].trim())== 0){
			
			context.write(new Text(mydata[mydata.length-2].trim()),new IntWritable(1));
		}*/
	}	
			
}
 
开发者ID:BhargaviRavula,项目名称:Bigdata,代码行数:22,代码来源:CountYelpReview.java

示例13: cleanup

import org.apache.hadoop.mapreduce.Mapper.Context; //导入依赖的package包/类
@Override
protected void cleanup(
		Context context)
		throws IOException, InterruptedException {
	// TODO Auto-generated method stub
	super.cleanup(context);
	Collections.sort(myarray,new MyMovieComparator());
	int count =0;
	for(MyBusinessData data : myarray){
		
		result.set(""+data.rating);
		myKey.set(data.businessId);
		context.write(myKey, result); // create a pair <keyword, number of occurences>
		count++;
		if(count >=10)break;}
	
}
 
开发者ID:BhargaviRavula,项目名称:Bigdata,代码行数:18,代码来源:Top10BusRev.java

示例14: startAligner

import org.apache.hadoop.mapreduce.Mapper.Context; //导入依赖的package包/类
@Override
protected void startAligner(Mapper.Context context) throws IOException, InterruptedException {
    // make command
    String customArgs = HalvadeConf.getCustomArgs(context.getConfiguration(), "bwa", "mem");
    String[] command = CommandGenerator.bwaMem(bin, ref, null, null, isPaired, true, threads, customArgs);
    pbw = new ProcessBuilderWrapper(command, bin);
    // run command
    // needs to be streamed to output otherwise the process blocks ...
    pbw.startProcess(null, System.err);
    // check if alive.
    if(!pbw.isAlive())
        throw new ProcessException("BWA mem", pbw.getExitState());
    pbw.getSTDINWriter();
    // make a SAMstream handler
    ssh = new SAMStreamHandler(instance, context, false);
    ssh.start();
}
 
开发者ID:biointec,项目名称:halvade,代码行数:18,代码来源:BWAMemInstance.java

示例15: logEvent

import org.apache.hadoop.mapreduce.Mapper.Context; //导入依赖的package包/类
static public void logEvent(FileSystem fs, Path path, LOGTYPES type,
    LOGRESULTS result, Codec codec, Context context, LogSample sample,
    String tag) {
  try {
    if (context == null) {
      incrRaidNodeMetricCounter(fs, type, result, tag);
    } else {
      incrLogMetricCounter(context, fs, type, result, tag);
    } 
    if (sample == null) sample = new LogSample();
    if (path != null) sample.addNormalValue(LOGKEYS.Path.name(), path.toString());
    if (codec != null) sample.addNormalValue(LOGKEYS.Code.name(), codec.id);
    sample.addNormalValue(LOGKEYS.Type.name(), type.name()); 
    sample.addNormalValue(LOGKEYS.Cluster.name(), fs.getUri().getAuthority());
    EVENTS_LOG.info(sample.toJSON());
  } catch (Exception e) {
    LOG.warn("Exception when logging the File_Fix_WaitTime metric : " +
        e.getMessage(), e);
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:21,代码来源:LogUtils.java


注:本文中的org.apache.hadoop.mapreduce.Mapper.Context类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。