當前位置: 首頁>>代碼示例>>Java>>正文


Java IntWritable類代碼示例

本文整理匯總了Java中org.apache.hadoop.io.IntWritable的典型用法代碼示例。如果您正苦於以下問題:Java IntWritable類的具體用法?Java IntWritable怎麽用?Java IntWritable使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。


IntWritable類屬於org.apache.hadoop.io包,在下文中一共展示了IntWritable類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: checkOuterConsistency

import org.apache.hadoop.io.IntWritable; //導入依賴的package包/類
private static void checkOuterConsistency(Job job, Path[] src) 
    throws IOException {
  Path outf = FileOutputFormat.getOutputPath(job);
  FileStatus[] outlist = cluster.getFileSystem().listStatus(outf, new 
                           Utils.OutputFileUtils.OutputFilesFilter());
  assertEquals("number of part files is more than 1. It is" + outlist.length,
    1, outlist.length);
  assertTrue("output file with zero length" + outlist[0].getLen(),
    0 < outlist[0].getLen());
  SequenceFile.Reader r =
    new SequenceFile.Reader(cluster.getFileSystem(),
        outlist[0].getPath(), job.getConfiguration());
  IntWritable k = new IntWritable();
  IntWritable v = new IntWritable();
  while (r.next(k, v)) {
    assertEquals("counts does not match", v.get(),
      countProduct(k, src, job.getConfiguration()));
  }
  r.close();
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:21,代碼來源:TestJoinDatamerge.java

示例2: main

import org.apache.hadoop.io.IntWritable; //導入依賴的package包/類
public static void main(String[] args) throws Exception {
    if(args.length != 2){
        System.err.println("Usage: MaxTemperatureWithCombiner <input path> <output path>");
        System.exit(-1);
    }

    Job job = new Job();
    job.setJarByClass(MaxTemperatureWithCombiner.class);
    job.setJobName("Max Temperature With Combiner");

    FileInputFormat.addInputPath(job, new Path(args[0]));
    FileOutputFormat.setOutputPath(job, new Path(args[1]));

    job.setMapperClass(MaxTemperatureMapper.class);
    job.setCombinerClass(MaxTemperatureReducer.class);
    job.setReducerClass(MaxTemperatureReducer.class);

    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(IntWritable.class);

    System.exit(job.waitForCompletion(true) ? 0 : 1);
}
 
開發者ID:myziyue,項目名稱:learn-to-hadoop,代碼行數:23,代碼來源:MaxTemperatureWithCombiner.java

示例3: reduce

import org.apache.hadoop.io.IntWritable; //導入依賴的package包/類
public void reduce(IntWritable key, Iterator<Text> values,
    OutputCollector<Text, Text> out,
    Reporter reporter) throws IOException {
  keyVal = key.get();
  while(values.hasNext()) {
    Text value = values.next();
    String towrite = value.toString() + "\n";
    indexStream.write(towrite.getBytes(Charsets.UTF_8));
    written++;
    if (written > numIndexes -1) {
      // every 1000 indexes we report status
      reporter.setStatus("Creating index for archives");
      reporter.progress();
      endIndex = keyVal;
      String masterWrite = startIndex + " " + endIndex + " " + startPos 
                          +  " " + indexStream.getPos() + " \n" ;
      outStream.write(masterWrite.getBytes(Charsets.UTF_8));
      startPos = indexStream.getPos();
      startIndex = endIndex;
      written = 0;
    }
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:24,代碼來源:HadoopArchives.java

示例4: map

import org.apache.hadoop.io.IntWritable; //導入依賴的package包/類
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {

    String[] keyVal = value.toString().split("\\t");
    double[] Ai = new double[Bh];
    int i = Integer.parseInt(keyVal[0]) - 1;
    String[] values = keyVal[1].split(",");
    for (int j = 0; j < values.length; j++) {
        Ai[j] = Double.parseDouble(values[j]);
    }
    double[] Ci = new double[Bw];
    StringBuilder result = new StringBuilder(prefix);

    for (int j = 0; j < Bw; j++) {
        Ci[j] = 0d;
        for (int k = 0; k < Bh; k++) {
            Ci[j] += Ai[k] * B[k][j];
        }
        result.append(Ci[j]);
        if (j != Bw - 1) {
            result.append(",");
        }
    }
    context.write(new IntWritable(i + 1), new Text(result.toString()));
}
 
開發者ID:Romm17,項目名稱:MRNMF,代碼行數:26,代碼來源:MM1.java

示例5: reduce

import org.apache.hadoop.io.IntWritable; //導入依賴的package包/類
public void reduce(IntWritable key, Iterable<IntWritable> values, 
    Context context) throws IOException, InterruptedException {

  int errors = 0;

  MarkableIterator<IntWritable> mitr = 
    new MarkableIterator<IntWritable>(values.iterator());

  switch (key.get()) {
  case 0:
    errors += test0(key, mitr);
    break;
  case 1:
    errors += test1(key, mitr);
    break;
  case 2:
    errors += test2(key, mitr);
    break;
  case 3:
    errors += test3(key, mitr);
    break;
  default:
    break;
  }
  context.write(key, new IntWritable(errors));
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:27,代碼來源:TestValueIterReset.java

示例6: collect

import org.apache.hadoop.io.IntWritable; //導入依賴的package包/類
@Override
/**
 * 給sql語句中的?賦值的方法
 */
public void collect(Configuration conf, BaseDimension key, BaseStatsValueWritable value, PreparedStatement pstmt,
		IDimensionConverter converter) throws SQLException, IOException {
	StatsUserDimension statsUserDimension = (StatsUserDimension) key;
	MapWritableValue mapWritableValue = (MapWritableValue) value;
	IntWritable newInstallUsers = (IntWritable) mapWritableValue.getValue().get(new IntWritable(-1));

	int i = 0;
	pstmt.setInt(++i, converter.getDimensionIdByValue(statsUserDimension.getStatsCommon().getPlatform()));
	pstmt.setInt(++i, converter.getDimensionIdByValue(statsUserDimension.getStatsCommon().getDate()));
	pstmt.setInt(++i, converter.getDimensionIdByValue(statsUserDimension.getBrowser()));
	pstmt.setInt(++i, newInstallUsers.get());
	pstmt.setString(++i, conf.get(GlobalConstants.RUNNING_DATE_PARAMES));
	pstmt.setInt(++i, newInstallUsers.get());
	pstmt.addBatch();
}
 
開發者ID:liuhaozzu,項目名稱:big_data,代碼行數:20,代碼來源:StatsDeviceBrowserNewInstallUserCollector.java

示例7: map

import org.apache.hadoop.io.IntWritable; //導入依賴的package包/類
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
	String doc = value.toString();
				
	String text = slice(doc, "<text", "</text>", true);
	if (text.length() < 1) return;
	
	char txt[] = text.toLowerCase().toCharArray();
	for (int i = 0; i < txt.length; ++i) {
		if (!((txt[i] >= 'a' && txt[i] <= 'z') || (txt[i] >= 'A' && txt[i] <= 'Z')))
			txt[i] = ' ';
	}
	
	String id = slice(doc, "<id>", "</id>", false);
	if (id.length() < 1) return;
	StringTokenizer itr = new StringTokenizer(String.valueOf(txt));
	int sum = itr.countTokens();
	while (itr.hasMoreTokens()) {
		String s = itr.nextToken();
		word.set(id + '-' + s);
		IntWritable tmp[] = {new IntWritable(sum), new IntWritable(1)};
		IntArrayWritable temp = new IntArrayWritable(tmp);
		context.write(word, temp);
	}
}
 
開發者ID:lzmhhh123,項目名稱:Wikipedia-Index,代碼行數:26,代碼來源:TF.java

示例8: map

import org.apache.hadoop.io.IntWritable; //導入依賴的package包/類
public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {

			String line = value.toString();

			line = line.trim().toLowerCase();
			line = line.replaceAll("[^a-z]+", " ");
			String words[] = line.split("\\s+"); //split by ' ', '\t', '\n', etc.
			
			if(words.length < 2) {
				return;
			}
			
			StringBuilder sb;
			for (int i = 0; i < words.length-1; i++) {
				sb = new StringBuilder();
				for (int j = 0;  i + j < words.length && j < noGram; j++) {
					sb.append(" ");
					sb.append(words[i + j]);
					context.write(new Text(sb.toString().trim()), new IntWritable(1));
				}
			}
		}
 
開發者ID:yogykwan,項目名稱:mapreduce-samples,代碼行數:23,代碼來源:NGramLibraryBuilder.java

示例9: reduce

import org.apache.hadoop.io.IntWritable; //導入依賴的package包/類
@Override
protected void reduce(twoDimensionIndexWritable key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
    if(key.getMatrixKind().equals(MatrixKind.Corpus)) {
        context.write(key, values.iterator().next());
        return ;
    } else if(key.getMatrixKind().equals(MatrixKind.DocTopic)||
            key.getMatrixKind().equals(MatrixKind.TopicWord)){
        int count = 0;
        for(Text text : values) {
            count += Integer.parseInt(text.toString());
        }
        if (key.getMatrixKind().equals(MatrixKind.DocTopic)) {
            writer1.append(new twoDimensionIndexWritable(key.getM(), key.getN()), new IntWritable(count));
        } else {
            writer2.append(new twoDimensionIndexWritable(key.getM(), key.getN()), new IntWritable(count));
        }
    }
    return;

}
 
開發者ID:huyang1,項目名稱:LDA,代碼行數:21,代碼來源:InitReducer.java

示例10: testNestedIterable

import org.apache.hadoop.io.IntWritable; //導入依賴的package包/類
public void testNestedIterable() throws Exception {
  Random r = new Random();
  Writable[] writs = {
    new BooleanWritable(r.nextBoolean()),
    new FloatWritable(r.nextFloat()),
    new FloatWritable(r.nextFloat()),
    new IntWritable(r.nextInt()),
    new LongWritable(r.nextLong()),
    new BytesWritable("dingo".getBytes()),
    new LongWritable(r.nextLong()),
    new IntWritable(r.nextInt()),
    new BytesWritable("yak".getBytes()),
    new IntWritable(r.nextInt())
  };
  TupleWritable sTuple = makeTuple(writs);
  assertTrue("Bad count", writs.length == verifIter(writs, sTuple, 0));
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:18,代碼來源:TestJoinTupleWritable.java

示例11: createJob

import org.apache.hadoop.io.IntWritable; //導入依賴的package包/類
public Job createJob() 
throws IOException {
  Configuration conf = getConf();
  conf.setInt(MRJobConfig.NUM_MAPS, 1);
  Job job = Job.getInstance(conf, "test");
  job.setNumReduceTasks(1);
  job.setJarByClass(CredentialsTestJob.class);
  job.setNumReduceTasks(1);
  job.setMapperClass(CredentialsTestJob.CredentialsTestMapper.class);
  job.setMapOutputKeyClass(IntWritable.class);
  job.setMapOutputValueClass(NullWritable.class);
  job.setReducerClass(CredentialsTestJob.CredentialsTestReducer.class);
  job.setInputFormatClass(SleepJob.SleepInputFormat.class);
  job.setPartitionerClass(SleepJob.SleepJobPartitioner.class);
  job.setOutputFormatClass(NullOutputFormat.class);
  job.setSpeculativeExecution(false);
  job.setJobName("test job");
  FileInputFormat.addInputPath(job, new Path("ignored"));
  return job;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:21,代碼來源:CredentialsTestJob.java

示例12: main

import org.apache.hadoop.io.IntWritable; //導入依賴的package包/類
public static void main(String[] args) throws Exception {

		Configuration conf = new Configuration();

		Job job = Job.getInstance(conf);
		job.setMapperClass(DataDividerMapper.class);
		job.setReducerClass(DataDividerReducer.class);

		job.setJarByClass(DataDividerByUser.class);

		job.setInputFormatClass(TextInputFormat.class);
		job.setOutputFormatClass(TextOutputFormat.class);
		job.setOutputKeyClass(IntWritable.class);
		job.setOutputValueClass(Text.class);

		TextInputFormat.setInputPaths(job, new Path(args[0]));
		TextOutputFormat.setOutputPath(job, new Path(args[1]));

		job.waitForCompletion(true);
	}
 
開發者ID:yogykwan,項目名稱:mapreduce-samples,代碼行數:21,代碼來源:DataDividerByUser.java

示例13: reduce

import org.apache.hadoop.io.IntWritable; //導入依賴的package包/類
@Test
public void reduce() {
    MaxTemperatureMapRed.MaxTemperatureReduce maxTemperatureReduce = new MaxTemperatureMapRed.MaxTemperatureReduce();
    try {
        List<IntWritable> list = new ArrayList<IntWritable>();
        list.add(new IntWritable(12));
        list.add(new IntWritable(31));
        list.add(new IntWritable(45));
        list.add(new IntWritable(23));
        list.add(new IntWritable(21));
        maxTemperatureReduce.reduce(new Text("1901"), list.iterator(), new OutputCollector<Text, IntWritable>() {
            @Override
            public void collect(final Text text, final IntWritable intWritable) throws IOException {
                log.info(text.toString() + "  " + intWritable.get());
            }
        }, null);
    } catch (IOException e) {
        e.printStackTrace();
    }
}
 
開發者ID:mumuhadoop,項目名稱:mumu-mapreduce,代碼行數:21,代碼來源:MaxTemperatureMapRedTest.java

示例14: main

import org.apache.hadoop.io.IntWritable; //導入依賴的package包/類
public static void main(String[] args) throws Exception {
	Configuration conf = new Configuration();
	Job job = Job.getInstance(conf, "test");
	
	job.setMapperClass(testMapper.class);
	job.setPartitionerClass(testPartitioner.class);
	job.setReducerClass(testReducer.class);
	job.setNumReduceTasks(10);
	
	job.setOutputKeyClass(Text.class);
	job.setOutputValueClass(IntWritable.class);
	
	FileInputFormat.setInputPaths(job, new Path(args[0]));
	FileOutputFormat.setOutputPath(job, new Path(args[1]));

	if (!job.waitForCompletion(true))
		return;
}
 
開發者ID:aadishgoel2013,項目名稱:Hadoop-Codes,代碼行數:19,代碼來源:testDriver.java

示例15: testMapredIntervalSampler

import org.apache.hadoop.io.IntWritable; //導入依賴的package包/類
/**
 * Verify IntervalSampler in mapred.lib.InputSampler, which is added back
 * for binary compatibility of M/R 1.x
 */
@Test (timeout = 30000)
@SuppressWarnings("unchecked") // IntWritable comparator not typesafe
public void testMapredIntervalSampler() throws Exception {
  final int TOT_SPLITS = 16;
  final int PER_SPLIT_SAMPLE = 4;
  final int NUM_SAMPLES = TOT_SPLITS * PER_SPLIT_SAMPLE;
  final double FREQ = 1.0 / TOT_SPLITS;
  org.apache.hadoop.mapred.lib.InputSampler.Sampler<IntWritable,NullWritable>
      sampler = new org.apache.hadoop.mapred.lib.InputSampler.IntervalSampler
          <IntWritable,NullWritable>(FREQ, NUM_SAMPLES);
  int inits[] = new int[TOT_SPLITS];
  for (int i = 0; i < TOT_SPLITS; ++i) {
    inits[i] = i;
  }
  Job ignored = Job.getInstance();
  Object[] samples = sampler.getSample(new TestInputSamplerIF(
        NUM_SAMPLES, TOT_SPLITS, inits), ignored);
  assertEquals(NUM_SAMPLES, samples.length);
  Arrays.sort(samples, new IntWritable.Comparator());
  for (int i = 0; i < NUM_SAMPLES; ++i) {
    assertEquals(i,
        ((IntWritable)samples[i]).get());
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:29,代碼來源:TestInputSampler.java


注:本文中的org.apache.hadoop.io.IntWritable類示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。