当前位置: 首页>>代码示例>>Java>>正文


Java Context类代码示例

本文整理汇总了Java中org.apache.hadoop.mapreduce.Reducer.Context的典型用法代码示例。如果您正苦于以下问题:Java Context类的具体用法?Java Context怎么用?Java Context使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


Context类属于org.apache.hadoop.mapreduce.Reducer包,在下文中一共展示了Context类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: recalVCF

import org.apache.hadoop.mapreduce.Reducer.Context; //导入依赖的package包/类
/**
 * recal one vcf file
 * @param id
 * @param values
 * @throws InterruptedException 
 * @throws IOException
 */
public void recalVCF(int id, Context context) throws IOException, InterruptedException{
	long start,end;
	start = System.currentTimeMillis();
	recalTable.getRecalibrationTable();
	end = System.currentTimeMillis();
	System.err.println("recal table time:"+(end-start)/1000+"s");
	
	start = System.currentTimeMillis();
	recalTable.indexData();
	end = System.currentTimeMillis();
	System.err.println("recal table index time:"+(end-start)/1000+"s");
	for (final Tranche t : recalTable.getTranches()) {
        if (t.ts >= options.getTSFilterLevel()) {
            tranches.add(t);
        }
    }
	// this algorithm wants the tranches ordered from best (lowest truth sensitivity) to worst (highest truth sensitivity)
    Collections.reverse(tranches); 
}
 
开发者ID:BGI-flexlab,项目名称:SOAPgaea,代码行数:27,代码来源:VCFRecalibrator.java

示例2: map

import org.apache.hadoop.mapreduce.Reducer.Context; //导入依赖的package包/类
public void map(Object key, Text value, 
    Context context) throws IOException, InterruptedException{

  int rating, reviewIndex, movieIndex;
  String reviews = new String();
  String tok = new String();
  String ratingStr = new String();

  String line = ((Text)value).toString();
  movieIndex = line.indexOf(":");
  if (movieIndex > 0) {
    reviews = line.substring(movieIndex + 1);
    StringTokenizer token = new StringTokenizer(reviews, ",");
    while (token.hasMoreTokens()) {
      tok = token.nextToken();
      reviewIndex = tok.indexOf("_");
      ratingStr = tok.substring(reviewIndex + 1);
      rating = Integer.parseInt(ratingStr);
      context.write(new IntWritable(rating), one);
    }
  }
}
 
开发者ID:yncxcw,项目名称:big-c,代码行数:23,代码来源:HistogramRatings.java

示例3: reduce

import org.apache.hadoop.mapreduce.Reducer.Context; //导入依赖的package包/类
public void reduce(Text key, Iterable<Text> values, Context context) throws IOException,
				InterruptedException {
			// 同一个单词会被分成同一个group
			float sum = 0;
			List<String> vals = new ArrayList<String>();
			for (Text str : values) {
				int index = str.toString().lastIndexOf(" ");
				sum += Integer.parseInt(str.toString().substring(index + 1)); // 统计此单词在所有文件中出现的次数
				vals.add(str.toString().substring(0, index)); // 保存
			}
			double tmp = Math.log10(totalArticle * 1.0 / (sum * 1.0)); // 单词在所有文件中出现的次数除以总文件数IDF
			for (int j = 0; j < vals.size(); j++) {
				String val = vals.get(j);
				String newsID=val.substring(0,val.indexOf(" "));
				String end = val.substring(val.lastIndexOf(" "));
				float f_end = Float.parseFloat(end); // 读取TF
				val += " ";
				val += f_end * tmp; // tf-idf值
//				context.write(key, new Text(val));
				context.write(new Text(newsID), new Text(key+" "+val.substring(val.indexOf(" ")+1)));
			}
		}
 
开发者ID:hejy12,项目名称:newsRecommender,代码行数:23,代码来源:TFIDF2.java

示例4: generateActivityForBlock

import org.apache.hadoop.mapreduce.Reducer.Context; //导入依赖的package包/类
public void generateActivityForBlock(int seed, ArrayList<Person> block, Context context) throws IOException {
    randomFarm_.resetRandomGenerators(seed);
    forumId = 0;
    messageId = 0;
    SN.machineId = seed;
    personActivitySerializer_.reset();
    int counter = 0;
    float personGenerationTime = 0.0f;
    for (Person p : block) {
        long start = System.currentTimeMillis();
        generateActivity(p, block);
        if (DatagenParams.updateStreams) {
            updateSerializer_.changePartition();
        }
        if (counter % 1000 == 0) {
            context.setStatus("Generating activity of person " + counter + " of block" + seed);
            context.progress();
        }
        float time = (System.currentTimeMillis() - start) / 1000.0f;
        personGenerationTime += time;
        counter++;
    }
    System.out.println("Average person activity generation time " + personGenerationTime / (float) block.size());
}
 
开发者ID:ldbc,项目名称:ldbc_snb_datagen,代码行数:25,代码来源:PersonActivityGenerator.java

示例5: readDistributedCacheFingerprint

import org.apache.hadoop.mapreduce.Reducer.Context; //导入依赖的package包/类
@SuppressWarnings("rawtypes")
	public static LocalStructure [] readDistributedCacheFingerprint(Context context, String fpid, boolean discarding) throws IOException {

	    URI[] input_files = context.getCacheFiles();
	    
		@SuppressWarnings("unchecked")
		Class<? extends LocalStructure> MatcherClass = (Class<? extends LocalStructure>) Util.getClassFromProperty(context, "matcher");

	    // Compute the localstructures of the input fingerprint
	    // and store so that all maps and reduces can access.
	    for(URI input_file : input_files) {
//			String[] lines = Util.readFileByLines(FilenameUtils.getName(input_file.getPath()));
			String[] lines = Util.readFileByLines(input_file.getPath());

			for(String line : lines) {
				if(LocalStructure.decodeFpid(line).equals(fpid))
					return LocalStructure.extractLocalStructures(MatcherClass, line);
			}
	    }
	    
	    System.err.println("readDistributedCacheFingerprint: input fingerprint " + fpid + " not found");
	    return null;
	}
 
开发者ID:dperaltac,项目名称:bigdata-fingerprint,代码行数:24,代码来源:Util.java

示例6: getOutfile

import org.apache.hadoop.mapreduce.Reducer.Context; //导入依赖的package包/类
public static String getOutfile(Context context) {
    try {
        if (HashPartitioner.class.isAssignableFrom(context.getPartitionerClass())) {
            String outpath = context.getConfiguration().get(HPCONFIG);
            if (outpath != null) {
            int partition = Job.getReducerId(context);
            return PrintTools.sprintf("%s/partition.%5d", outpath, partition);
            } else {
                log.info("must use setOutPath on HashPartitioner");
            }
        }
    } catch (ClassNotFoundException ex) {
        log.exception(ex, "HashPartitioner");
    }
    throw new RuntimeException("fatal");
}
 
开发者ID:htools,项目名称:htools,代码行数:17,代码来源:HashPartitioner.java

示例7: map

import org.apache.hadoop.mapreduce.Reducer.Context; //导入依赖的package包/类
@Override
public void map(ImmutableBytesWritable row, Result value, Context context) throws InterruptedException, IOException {
 Text resText = null;
 String prim_key = new String(row.get());
 for(KeyValue kv : value.raw()) {
  try{
	  double norm = (double)Integer.parseInt(new String(kv.getValue())) / prim_key.length();
	  //double norm = (double)Integer.parseInt(new String(kv.getValue()));
	  //double norm = (double)Integer.parseInt(new String(kv.getValue())) / kv.getQualifier().toString().length();
	  resText = new Text(prim_key + "," + String.valueOf(norm));	  
	  String qual = new String (kv.getQualifier());
	  context.write(new Text(qual), resText);
	  //System.out.println("WriteIndicesMapper: w_i = " + prim_key + " w_c = " + qual + " <w_c>, <w_i, norm_ic> = " + resText);
  }
  catch(Exception e) {
	  System.out.println("Exception in mapper for key = " + prim_key);
  }
	  }
}
 
开发者ID:Avlessi,项目名称:SentimentAnalysis,代码行数:20,代码来源:WriteIndicesSetDriver.java

示例8: map

import org.apache.hadoop.mapreduce.Reducer.Context; //导入依赖的package包/类
public void map(VarLongWritable key,VectorWritable value,Context context) throws IOException, InterruptedException{  

                long userID=key.get();  
                Vector userVector=value.get();  
                Iterator<Vector.Element> it=userVector.nonZeroes().iterator();  
                IntWritable itemi=new IntWritable();  
                while(it.hasNext()){  
                    Vector.Element e=it.next();  
                    int itemIndex=e.index();  
                    float preferenceValue=(float)e.get();  
                    itemi.set(itemIndex);  
                    context.write(itemi, new VectorOrPrefWritable(userID,preferenceValue));  
                   System.out.println("item :"+itemi+",userand val:"+userID+","+preferenceValue);  
                } 
              
        }
 
开发者ID:bytegriffin,项目名称:recsys-offline,代码行数:17,代码来源:Step32.java

示例9: reduce

import org.apache.hadoop.mapreduce.Reducer.Context; //导入依赖的package包/类
@Override
protected void reduce(NullWritable key, Iterable<TrainingWeights> values, Context context)
    throws IOException, InterruptedException {
  TrainingWeights result = null;
  int total = 0;
  for (TrainingWeights weights : values) {
    if (result == null) {
      result = weights;
    } else {
      addWeights(result, weights);
    }
    total++;
  }
  if (total > 1) {
    divideWeights(result, total);
  }
  context.write(NullWritable.get(), result);
}
 
开发者ID:fancyerii,项目名称:chinesesegmentor,代码行数:19,代码来源:ParallelTraining.java

示例10: reduce

import org.apache.hadoop.mapreduce.Reducer.Context; //导入依赖的package包/类
@Override
protected void reduce(IntWritable key, Iterable<MyKey> values, Context context)
    throws IOException, InterruptedException {
  double w = 0;
  int total = 0;
  double[] array = new double[6];
  for (MyKey value : values) {
    total++;
    w += value.score * value.score;
    array[value.id] = value.score;
  }
  if (total != 6) {
    throw new IOException("not 6 for: " + key.get());
  }

  MyKey k = new MyKey(key.get(), w);
  MyValue v = new MyValue(array);
  context.write(k, v);
}
 
开发者ID:fancyerii,项目名称:chinesesegmentor,代码行数:20,代码来源:CalcFeatureWeights.java

示例11: reduce

import org.apache.hadoop.mapreduce.Reducer.Context; //导入依赖的package包/类
public void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
	Set docs = new HashSet();
	Text output = new Text();
	StringBuffer result = new StringBuffer();
	for (Text val : values) {
		docs.add(val.toString());
	}       
	result.append(docs.size()+"#");
	Iterator setIter = docs.iterator();
	while(setIter.hasNext())
	{           
		Object setValue = setIter.next();
		result.append(setValue.toString() + "#");
	}
	output.set(result.toString().substring(0, result.length() - 1));
	context.write(key, output);
}
 
开发者ID:akhilesh890,项目名称:mutual-information-words,代码行数:18,代码来源:wordcountReduce.java

示例12: constructMapReport

import org.apache.hadoop.mapreduce.Reducer.Context; //导入依赖的package包/类
public boolean constructMapReport(SamRecordDatum datum, ReferenceShare genome, String chrName, Context context) {
	ChromosomeInformationShare chrInfo = genome.getChromosomeInfo(chrName);
	rTracker.setTrackerAttribute(ReadType.TOTALREADS);
	// 当位点坐标值+read长度大于染色体的长度时,则不处理该read,进入下一次循环
	if(datum.getEnd() >= chrInfo.getLength()) {
		context.getCounter("Exception", "read end pos more than chr end pos").increment(1);
		return false;
	}
	
	rTracker.setTrackerAttribute(ReadType.MAPPED);
	bTracker.setTrackerAttribute(BaseType.TOTALBASE.setCount(datum.getBaseCount()));
	
	if(datum.isUniqueAlignment()) {
		rTracker.setTrackerAttribute(ReadType.UNIQUE);
	}
		
	if ((datum.getFlag() & 0x400) != 0) {
		rTracker.setTrackerAttribute(ReadType.DUP);
	}
	
	if ((datum.getFlag() & 0x40) != 0 && (datum.getFlag() & 0x8) == 0) {
		rTracker.setTrackerAttribute(ReadType.PE);
	}
		
	String cigar = datum.getCigarString();
	if (cigar.contains("S") || cigar.contains("H")) {
		rTracker.setTrackerAttribute(ReadType.CLIPPED);
	}
	
	if (cigar.contains("D") || cigar.contains("I")) {
		rTracker.setTrackerAttribute(ReadType.INDEL);
	}
		
	if (isMismatch(datum, chrInfo)) {
		rTracker.setTrackerAttribute(ReadType.MISMATCHREADS);
	}
	return true;
}
 
开发者ID:BGI-flexlab,项目名称:SOAPgaea,代码行数:39,代码来源:BasicReport.java

示例13: RecalibratorContextWriter

import org.apache.hadoop.mapreduce.Reducer.Context; //导入依赖的package包/类
@SuppressWarnings({ "rawtypes", "unchecked" })
public RecalibratorContextWriter(Context ctx,boolean multiple) {
	if(multiple)
		mos = new MultipleOutputs<NullWritable, Text>(ctx);
	this.context = ctx;
	value = new SamRecordWritable();
}
 
开发者ID:BGI-flexlab,项目名称:SOAPgaea,代码行数:8,代码来源:RecalibratorContextWriter.java

示例14: reduce

import org.apache.hadoop.mapreduce.Reducer.Context; //导入依赖的package包/类
@Override
public void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
	int countItemFreq = 0;
	
	for (IntWritable value : values){
		countItemFreq += value.get();
	}
	
	int minsup = Integer.parseInt(context.getConfiguration().get("minsup"));
	
	if (countItemFreq >= minsup)
	{
		context.write(key, new IntWritable(countItemFreq));
	}
}
 
开发者ID:ledgku,项目名称:Apriori_Hadoop,代码行数:16,代码来源:Reduce.java

示例15: map

import org.apache.hadoop.mapreduce.Reducer.Context; //导入依赖的package包/类
public void map(Object key, Text value, Context context
                ) throws IOException, InterruptedException {
  StringTokenizer itr = new StringTokenizer(value.toString());
  while (itr.hasMoreTokens()) {
    word.set(itr.nextToken());
    context.write(word, one);
  }
}
 
开发者ID:yncxcw,项目名称:big-c,代码行数:9,代码来源:MutiWordcount.java


注:本文中的org.apache.hadoop.mapreduce.Reducer.Context类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。