当前位置: 首页>>代码示例>>Java>>正文


Java Context.write方法代码示例

本文整理汇总了Java中org.apache.hadoop.mapreduce.Mapper.Context.write方法的典型用法代码示例。如果您正苦于以下问题:Java Context.write方法的具体用法?Java Context.write怎么用?Java Context.write使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.mapreduce.Mapper.Context的用法示例。


在下文中一共展示了Context.write方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: map

import org.apache.hadoop.mapreduce.Mapper.Context; //导入方法依赖的package包/类
@Override
public void map(NullWritable key, NullWritable value, Context context) throws IOException, InterruptedException {

  int counter = 0;
  
  System.out.println("starting mapper");
  System.out.println();
  for (int i = 0; i < numberOfRecords; i++) {
    String keyRoot = StringUtils.leftPad(Integer.toString(r.nextInt(Short.MAX_VALUE)), 5, '0');

    if (i % 1000 == 0) {
      System.out.print(".");
    }

    for (int j = 0; j < 10; j++) {
      hKey.set(Bytes.toBytes(keyRoot + "|" + runID + "|" + taskId));
      kv = new KeyValue(hKey.get(), columnFamily, Bytes.toBytes("C" + j), Bytes.toBytes("counter:" + counter++ ));
      context.write(hKey, kv);
    }
  }

  System.out.println("finished mapper");
}
 
开发者ID:tmalaska,项目名称:HBase-ToHDFS,代码行数:24,代码来源:PopulateTable.java

示例2: map

import org.apache.hadoop.mapreduce.Mapper.Context; //导入方法依赖的package包/类
public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {

			String line = value.toString();

			line = line.trim().toLowerCase();
			line = line.replaceAll("[^a-z]+", " ");
			String words[] = line.split("\\s+"); //split by ' ', '\t', '\n', etc.
			
			if(words.length < 2) {
				return;
			}
			
			StringBuilder sb;
			for (int i = 0; i < words.length-1; i++) {
				sb = new StringBuilder();
				for (int j = 0;  i + j < words.length && j < noGram; j++) {
					sb.append(" ");
					sb.append(words[i + j]);
					context.write(new Text(sb.toString().trim()), new IntWritable(1));
				}
			}
		}
 
开发者ID:yogykwan,项目名称:mapreduce-samples,代码行数:23,代码来源:NGramLibraryBuilder.java

示例3: setup

import org.apache.hadoop.mapreduce.Mapper.Context; //导入方法依赖的package包/类
@Override
protected void setup(Context context) throws IOException, InterruptedException {
    Configuration conf = context.getConfiguration();
    overrideRdfContext = conf.getBoolean(OVERRIDE_CONTEXT_PROPERTY, false);
    String defCtx = conf.get(DEFAULT_CONTEXT_PROPERTY);
    defaultRdfContext = defCtx == null ? null : SimpleValueFactory.getInstance().createIRI(defCtx);
    decimationFactor = conf.getInt(DECIMATION_FACTOR_PROPERTY, DEFAULT_DECIMATION_FACTOR);
    for (byte b = 1; b < 6; b++) {
        context.write(new ImmutableBytesWritable(new byte[] {b}), new LongWritable(1));
    }
    timestamp = conf.getLong(DEFAULT_TIMESTAMP_PROPERTY, System.currentTimeMillis());
}
 
开发者ID:Merck,项目名称:Halyard,代码行数:13,代码来源:HalyardPreSplit.java

示例4: map

import org.apache.hadoop.mapreduce.Mapper.Context; //导入方法依赖的package包/类
public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
	//from ratings
	
	String[] mydata = value.toString().split("\\^");
	
	if (mydata.length == 3){
			if (mydata[1].contains("Palo Alto")){
			context.write(new Text(mydata[0].trim()),new IntWritable(1));
			}
			
		/*if("review".compareTo(mydata[mydata.length-2].trim())== 0){
			
			context.write(new Text(mydata[mydata.length-2].trim()),new IntWritable(1));
		}
		if("user".compareTo(mydata[mydata.length-2].trim())== 0){
			
			context.write(new Text(mydata[mydata.length-2].trim()),new IntWritable(1));
		}*/
	}	
			
}
 
开发者ID:BhargaviRavula,项目名称:Bigdata,代码行数:22,代码来源:CountYelpReview.java

示例5: cleanup

import org.apache.hadoop.mapreduce.Mapper.Context; //导入方法依赖的package包/类
@Override
protected void cleanup(
		Context context)
		throws IOException, InterruptedException {
	// TODO Auto-generated method stub
	super.cleanup(context);
	Collections.sort(myarray,new MyMovieComparator());
	int count =0;
	for(MyBusinessData data : myarray){
		
		result.set(""+data.rating);
		myKey.set(data.businessId);
		context.write(myKey, result); // create a pair <keyword, number of occurences>
		count++;
		if(count >=10)break;}
	
}
 
开发者ID:BhargaviRavula,项目名称:Bigdata,代码行数:18,代码来源:Top10BusRev.java

示例6: map

import org.apache.hadoop.mapreduce.Mapper.Context; //导入方法依赖的package包/类
public void map(Map<String, ByteBuffer> keys, Map<String, ByteBuffer> columns, Context context) throws IOException, InterruptedException
{
    for (Entry<String, ByteBuffer> column : columns.entrySet())
    {
        if (!"line".equalsIgnoreCase(column.getKey()))
            continue;

        String value = ByteBufferUtil.string(column.getValue());

        StringTokenizer itr = new StringTokenizer(value);
        while (itr.hasMoreTokens())
        {
            word.set(itr.nextToken());
            context.write(word, one);
        }
    }
}
 
开发者ID:vcostet,项目名称:cassandra-kmean,代码行数:18,代码来源:WordCount.java

示例7: map

import org.apache.hadoop.mapreduce.Mapper.Context; //导入方法依赖的package包/类
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
  if (first) {
    FileSplit split = (FileSplit) context.getInputSplit();
    Path path = split.getPath(); // current split path
    lvalue.set(path.getName());
    lkey.set(key.get());
    context.write(lkey, lvalue);

    first = false;
  }

  String line = value.toString();
  if (!line.isEmpty()) {
    Instance instance = converter.convert(line);
    double prediction = ruleBase.classify(instance);
    lkey.set(dataset.getLabel(instance));
    lvalue.set(Double.toString(prediction));
    context.write(lkey, lvalue);
  }
}
 
开发者ID:saradelrio,项目名称:Chi-FRBCS-BigData-Ave,代码行数:22,代码来源:Chi_RWClassifier.java

示例8: reduce

import org.apache.hadoop.mapreduce.Mapper.Context; //导入方法依赖的package包/类
public void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
	String word = key.toString();
	double sumPosScore = 0;
	double sumNegScore = 0;
	
	for(Text text : values) {
		String line = text.toString();   	 			
		String [] ar = line.split(",");
		String w_seed = ar[0];
		if(SeedWordsHelper.isPositive(w_seed)) 
			sumPosScore += Double.parseDouble(ar[1]);   	 			
		else if(SeedWordsHelper.isNegative(w_seed))
			sumNegScore += Double.parseDouble(ar[1]);	 			   	 			
	}
	//context.write(new Text(word.substring(0, 1)), new Text(word + "=" + sumPosScore + ";" + sumNegScore));
	context.write(new Text(word), new Text(sumPosScore + " " + sumNegScore));
}
 
开发者ID:Avlessi,项目名称:SentimentAnalysis,代码行数:18,代码来源:PosNegScoreCalculationDriver.java

示例9: map

import org.apache.hadoop.mapreduce.Mapper.Context; //导入方法依赖的package包/类
public void map(ImmutableBytesWritable row, Result value, Context context) throws InterruptedException, IOException {			
	String w_seed = new String(row.get());
	//System.out.println("PosNegScoreCalculationMapper: w_seed =" + w_seed);
	for(KeyValue kv : value.raw()){
		String word = new String(kv.getQualifier());
		double score = Bytes.toDouble(kv.getValue());
		context.write(new Text(word), new Text(w_seed + "," + String.valueOf(score)));
		//System.out.println("To Reducer goes: key = " + word + " value = w_seed, score which is " + w_seed + "," + score);
	}
}
 
开发者ID:Avlessi,项目名称:SentimentAnalysis,代码行数:11,代码来源:PosNegScoreCalculationDriver.java

示例10: reduce

import org.apache.hadoop.mapreduce.Mapper.Context; //导入方法依赖的package包/类
public void reduce(Text word, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException
{
    int sum = 0;
    for (IntWritable val : values)
        sum += val.get();
    keys.put("word", ByteBufferUtil.bytes(word.toString()));
    context.write(keys, getBindVariables(word, sum));
}
 
开发者ID:Stratio,项目名称:stratio-cassandra,代码行数:9,代码来源:WordCount.java

示例11: map

import org.apache.hadoop.mapreduce.Mapper.Context; //导入方法依赖的package包/类
@Override
public void map(BytesWritable key, Tuple value, Context context) throws IOException, InterruptedException {      
  System.out.println("key = " + key);
  System.out.println("value = " + value);
  
  context.write(key, value);
}
 
开发者ID:sigmoidanalytics,项目名称:spork-streaming,代码行数:8,代码来源:TestBasicTableUnion.java

示例12: reduce

import org.apache.hadoop.mapreduce.Mapper.Context; //导入方法依赖的package包/类
@Override
 public void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
 	for(Text val : values) {
 		String str = val.toString();
 		
Delete delRow1 = new Delete(Bytes.toBytes(key.toString()));
delRow1.deleteColumn("weight".getBytes(), str.getBytes());
//System.out.println("RemoveEdgesReducer: key = " + key.toString() + " sec_key = " + str);

Delete delRow2 = new Delete(Bytes.toBytes(str.toString()));
delRow2.deleteColumn("weight".getBytes(), key.getBytes());
//System.out.println("RemoveEdgesReducer: key = " + str + " sec_key = " + key.toString());

context.write(null, delRow1);
context.write(null, delRow2);        		
 	}
 }
 
开发者ID:Avlessi,项目名称:SentimentAnalysis,代码行数:18,代码来源:RemoveEdgesDriver.java

示例13: reduce

import org.apache.hadoop.mapreduce.Mapper.Context; //导入方法依赖的package包/类
public void reduce(Text key, Iterable<DoubleWritable> values, Context context) throws IOException, InterruptedException {
//   	    		int i = 0;
//   	    		for (IntWritable val : values) {
//   	    			i += val.get();
//   	    		}
   	    		//Put put = new Put(Bytes.toBytes("some"));
   	    		//put.add("cooccurrence".getBytes(), "sdff".getBytes(), Bytes.toBytes(1));

   	    		//context.write(null, put);
   	 		
   	 		double cos_sim = 0;
   	 		for(DoubleWritable d : values) {
   	 			cos_sim += Double.parseDouble(d.toString());
   	 		}   	 	
   	 		String line = key.toString();
   	 		String [] str = line.split(",");
   	 		String w_u = str[0];
   	 		String w_v = str[1];
   	 		//if(cos_sim > theta) {
   	 			Put put1 = new Put(Bytes.toBytes(w_u));   	 		
   	 			put1.add("weight".getBytes(), w_v.getBytes(), Bytes.toBytes(cos_sim));
   	 			//System.out.println("GraphTable_cos_similatiry: key = " + w_u + " sec_key = " + w_v + " weight = " + cos_sim);
   	 			Put put2 = new Put(Bytes.toBytes(w_v));
   	 			put2.add("weight".getBytes(), w_u.getBytes(), Bytes.toBytes(cos_sim));
   	 			//System.out.println("GraphTable_cos_similatiry: key = " + w_v + " sec_key = " + w_u + " weight = " + cos_sim);
   	 			context.write(null, put1);
   	 			context.write(null, put2);
   	 		//}
   	   	}
 
开发者ID:Avlessi,项目名称:SentimentAnalysis,代码行数:30,代码来源:CosineSimilarityDriver.java

示例14: collect

import org.apache.hadoop.mapreduce.Mapper.Context; //导入方法依赖的package包/类
@Override
public void collect(Context oc, Tuple tuple) 
        throws InterruptedException, IOException {
    
    Byte index = (Byte)tuple.get(0);
    PigNullableWritable key =
        HDataType.getWritableComparableTypes(tuple.get(1), keyType);
    NullableTuple val = new NullableTuple((Tuple)tuple.get(2));
    
    // Both the key and the value need the index.  The key needs it so
    // that it can be sorted on the index in addition to the key
    // value.  The value needs it so that POPackage can properly
    // assign the tuple to its slot in the projection.
    key.setIndex(index);
    val.setIndex(index);

    oc.write(key, val);
}
 
开发者ID:PonIC,项目名称:PonIC,代码行数:19,代码来源:PigGenericMapReduce.java

示例15: processOnePackageOutput

import org.apache.hadoop.mapreduce.Mapper.Context; //导入方法依赖的package包/类
public boolean processOnePackageOutput(Context oc) 
        throws IOException, InterruptedException {

    Result res = pack.getNext(DUMMYTUPLE);
    if(res.returnStatus==POStatus.STATUS_OK){
        Tuple packRes = (Tuple)res.result;
        
        if(rp.isEmpty()){
            oc.write(null, packRes);
            return false;
        }
        for (int i = 0; i < roots.length; i++) {
            roots[i].attachInput(packRes);
        }
        runPipeline(leaf);
        
    }
    
    if(res.returnStatus==POStatus.STATUS_NULL) {
        return false;
    }
    
    if(res.returnStatus==POStatus.STATUS_ERR){
        int errCode = 2093;
        String msg = "Encountered error in package operator while processing group.";
        throw new ExecException(msg, errCode, PigException.BUG);
    }
    
    if(res.returnStatus==POStatus.STATUS_EOP) {
        return true;
    }
        
    return false;
    
}
 
开发者ID:PonIC,项目名称:PonIC,代码行数:36,代码来源:PigGenericMapReduce.java


注:本文中的org.apache.hadoop.mapreduce.Mapper.Context.write方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。