当前位置: 首页>>代码示例>>Java>>正文


Java Context.write方法代码示例

本文整理汇总了Java中org.apache.hadoop.mapreduce.Reducer.Context.write方法的典型用法代码示例。如果您正苦于以下问题:Java Context.write方法的具体用法?Java Context.write怎么用?Java Context.write使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.mapreduce.Reducer.Context的用法示例。


在下文中一共展示了Context.write方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: map

import org.apache.hadoop.mapreduce.Reducer.Context; //导入方法依赖的package包/类
public void map(Object key, Text value, 
    Context context) throws IOException, InterruptedException{

  int rating, reviewIndex, movieIndex;
  String reviews = new String();
  String tok = new String();
  String ratingStr = new String();

  String line = ((Text)value).toString();
  movieIndex = line.indexOf(":");
  if (movieIndex > 0) {
    reviews = line.substring(movieIndex + 1);
    StringTokenizer token = new StringTokenizer(reviews, ",");
    while (token.hasMoreTokens()) {
      tok = token.nextToken();
      reviewIndex = tok.indexOf("_");
      ratingStr = tok.substring(reviewIndex + 1);
      rating = Integer.parseInt(ratingStr);
      context.write(new IntWritable(rating), one);
    }
  }
}
 
开发者ID:yncxcw,项目名称:big-c,代码行数:23,代码来源:HistogramRatings.java

示例2: reduce

import org.apache.hadoop.mapreduce.Reducer.Context; //导入方法依赖的package包/类
public void reduce(Text key, Iterable<Text> values, Context context) throws IOException,
				InterruptedException {
			// 同一个单词会被分成同一个group
			float sum = 0;
			List<String> vals = new ArrayList<String>();
			for (Text str : values) {
				int index = str.toString().lastIndexOf(" ");
				sum += Integer.parseInt(str.toString().substring(index + 1)); // 统计此单词在所有文件中出现的次数
				vals.add(str.toString().substring(0, index)); // 保存
			}
			double tmp = Math.log10(totalArticle * 1.0 / (sum * 1.0)); // 单词在所有文件中出现的次数除以总文件数IDF
			for (int j = 0; j < vals.size(); j++) {
				String val = vals.get(j);
				String newsID=val.substring(0,val.indexOf(" "));
				String end = val.substring(val.lastIndexOf(" "));
				float f_end = Float.parseFloat(end); // 读取TF
				val += " ";
				val += f_end * tmp; // tf-idf值
//				context.write(key, new Text(val));
				context.write(new Text(newsID), new Text(key+" "+val.substring(val.indexOf(" ")+1)));
			}
		}
 
开发者ID:hejy12,项目名称:newsRecommender,代码行数:23,代码来源:TFIDF2.java

示例3: map

import org.apache.hadoop.mapreduce.Reducer.Context; //导入方法依赖的package包/类
@Override
public void map(ImmutableBytesWritable row, Result value, Context context) throws InterruptedException, IOException {
 Text resText = null;
 String prim_key = new String(row.get());
 for(KeyValue kv : value.raw()) {
  try{
	  double norm = (double)Integer.parseInt(new String(kv.getValue())) / prim_key.length();
	  //double norm = (double)Integer.parseInt(new String(kv.getValue()));
	  //double norm = (double)Integer.parseInt(new String(kv.getValue())) / kv.getQualifier().toString().length();
	  resText = new Text(prim_key + "," + String.valueOf(norm));	  
	  String qual = new String (kv.getQualifier());
	  context.write(new Text(qual), resText);
	  //System.out.println("WriteIndicesMapper: w_i = " + prim_key + " w_c = " + qual + " <w_c>, <w_i, norm_ic> = " + resText);
  }
  catch(Exception e) {
	  System.out.println("Exception in mapper for key = " + prim_key);
  }
	  }
}
 
开发者ID:Avlessi,项目名称:SentimentAnalysis,代码行数:20,代码来源:WriteIndicesSetDriver.java

示例4: map

import org.apache.hadoop.mapreduce.Reducer.Context; //导入方法依赖的package包/类
public void map(VarLongWritable key,VectorWritable value,Context context) throws IOException, InterruptedException{  

                long userID=key.get();  
                Vector userVector=value.get();  
                Iterator<Vector.Element> it=userVector.nonZeroes().iterator();  
                IntWritable itemi=new IntWritable();  
                while(it.hasNext()){  
                    Vector.Element e=it.next();  
                    int itemIndex=e.index();  
                    float preferenceValue=(float)e.get();  
                    itemi.set(itemIndex);  
                    context.write(itemi, new VectorOrPrefWritable(userID,preferenceValue));  
                   System.out.println("item :"+itemi+",userand val:"+userID+","+preferenceValue);  
                } 
              
        }
 
开发者ID:bytegriffin,项目名称:recsys-offline,代码行数:17,代码来源:Step32.java

示例5: reduce

import org.apache.hadoop.mapreduce.Reducer.Context; //导入方法依赖的package包/类
@Override
protected void reduce(NullWritable key, Iterable<TrainingWeights> values, Context context)
    throws IOException, InterruptedException {
  TrainingWeights result = null;
  int total = 0;
  for (TrainingWeights weights : values) {
    if (result == null) {
      result = weights;
    } else {
      addWeights(result, weights);
    }
    total++;
  }
  if (total > 1) {
    divideWeights(result, total);
  }
  context.write(NullWritable.get(), result);
}
 
开发者ID:fancyerii,项目名称:chinesesegmentor,代码行数:19,代码来源:ParallelTraining.java

示例6: reduce

import org.apache.hadoop.mapreduce.Reducer.Context; //导入方法依赖的package包/类
@Override
protected void reduce(IntWritable key, Iterable<MyKey> values, Context context)
    throws IOException, InterruptedException {
  double w = 0;
  int total = 0;
  double[] array = new double[6];
  for (MyKey value : values) {
    total++;
    w += value.score * value.score;
    array[value.id] = value.score;
  }
  if (total != 6) {
    throw new IOException("not 6 for: " + key.get());
  }

  MyKey k = new MyKey(key.get(), w);
  MyValue v = new MyValue(array);
  context.write(k, v);
}
 
开发者ID:fancyerii,项目名称:chinesesegmentor,代码行数:20,代码来源:CalcFeatureWeights.java

示例7: reduce

import org.apache.hadoop.mapreduce.Reducer.Context; //导入方法依赖的package包/类
public void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
	Set docs = new HashSet();
	Text output = new Text();
	StringBuffer result = new StringBuffer();
	for (Text val : values) {
		docs.add(val.toString());
	}       
	result.append(docs.size()+"#");
	Iterator setIter = docs.iterator();
	while(setIter.hasNext())
	{           
		Object setValue = setIter.next();
		result.append(setValue.toString() + "#");
	}
	output.set(result.toString().substring(0, result.length() - 1));
	context.write(key, output);
}
 
开发者ID:akhilesh890,项目名称:mutual-information-words,代码行数:18,代码来源:wordcountReduce.java

示例8: reduce

import org.apache.hadoop.mapreduce.Reducer.Context; //导入方法依赖的package包/类
@Override
public void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
	int countItemFreq = 0;
	
	for (IntWritable value : values){
		countItemFreq += value.get();
	}
	
	int minsup = Integer.parseInt(context.getConfiguration().get("minsup"));
	
	if (countItemFreq >= minsup)
	{
		context.write(key, new IntWritable(countItemFreq));
	}
}
 
开发者ID:ledgku,项目名称:Apriori_Hadoop,代码行数:16,代码来源:Reduce.java

示例9: map

import org.apache.hadoop.mapreduce.Reducer.Context; //导入方法依赖的package包/类
public void map(Object key, Text value, Context context
                ) throws IOException, InterruptedException {
  StringTokenizer itr = new StringTokenizer(value.toString());
  while (itr.hasMoreTokens()) {
    word.set(itr.nextToken());
    context.write(word, one);
  }
}
 
开发者ID:yncxcw,项目名称:big-c,代码行数:9,代码来源:MutiWordcount.java

示例10: reduce

import org.apache.hadoop.mapreduce.Reducer.Context; //导入方法依赖的package包/类
public void reduce(Text key, Iterable<IntWritable> values, 
                   Context context
                   ) throws IOException, InterruptedException {
  int sum = 0;
  for (IntWritable val : values) {
    sum += val.get();
  }
  result.set(sum);
  context.write(key, result);
}
 
开发者ID:yncxcw,项目名称:big-c,代码行数:11,代码来源:MutiWordcount.java

示例11: reduce

import org.apache.hadoop.mapreduce.Reducer.Context; //导入方法依赖的package包/类
public void reduce(IntWritable key, Iterable<IntWritable> values, Context context
        ) throws IOException, InterruptedException {
  int sum = 0;
  for (IntWritable val : values) {
      sum += val.get();
  }
  context.write(key, new IntWritable(sum));
}
 
开发者ID:yncxcw,项目名称:big-c,代码行数:9,代码来源:HistogramRatings.java

示例12: map

import org.apache.hadoop.mapreduce.Reducer.Context; //导入方法依赖的package包/类
public void map(LongWritable key, Text value, Context context) throws IOException,
		InterruptedException {
	String[] lineSplits = value.toString().split("\t");
	String newsID = lineSplits[1];
	String content = lineSplits[4];
	String publishTime=lineSplits[5];
	Calendar cal1 = Calendar.getInstance();
	try {
		cal1.setTime(new SimpleDateFormat("yyyy年MM月dd日HH:mm").parse(publishTime));
		publishTime=Long.toString(cal1.getTimeInMillis());
	} catch (Exception e) {
		publishTime="0";
	}
	context.write(new Text(newsID+"|"+publishTime+"|"+content),new Text(""));
}
 
开发者ID:hejy12,项目名称:newsRecommender,代码行数:16,代码来源:TFIDF2.java

示例13: map

import org.apache.hadoop.mapreduce.Reducer.Context; //导入方法依赖的package包/类
@Override
public void map(IntWritable r, VectorWritable v, Context context) throws IOException {
	try {
		for (Entry<String, Vector> w : classWeights.entrySet()) {
			context.write(new Text(String.valueOf(r.get())+"_"+w.getKey()), new DoubleWritable(v.get().dot(w.getValue())));
		}
		
	} catch (InterruptedException e) {
		// TODO Auto-generated catch block
		e.printStackTrace();
	}
}
 
开发者ID:cdgore,项目名称:Ankus,代码行数:13,代码来源:MahoutDotProductDistributedCache.java

示例14: map

import org.apache.hadoop.mapreduce.Reducer.Context; //导入方法依赖的package包/类
@Override
public void map(LongWritable r, VectorWritable v, Context context) throws IOException {
	try {
		Vector newV = v.get().minus(columnMeans);
		context.write(new IntWritable((int)r.get()), new VectorWritable(newV));
	} catch (InterruptedException e) {
		// TODO Auto-generated catch block
		e.printStackTrace();
	}
}
 
开发者ID:cdgore,项目名称:Ankus,代码行数:11,代码来源:SubtractColumnMeans.java

示例15: map

import org.apache.hadoop.mapreduce.Reducer.Context; //导入方法依赖的package包/类
@Override
public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
	String line = value.toString();
	String [] words = line.split("\t");
	String [] sentence = words[1].split("=");
	String [] scores = sentence[1].split(";");
	String posScore = scores[0];
	String negScore = scores[1];
	
	context.write(new Text(words[0]), new Text(posScore + ";" + negScore));		
}
 
开发者ID:Avlessi,项目名称:SentimentAnalysis,代码行数:12,代码来源:CalculateBetaDriver.java


注:本文中的org.apache.hadoop.mapreduce.Reducer.Context.write方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。