当前位置: 首页>>代码示例>>Java>>正文


Java IntWritable类代码示例

本文整理汇总了Java中org.apache.hadoop.io.IntWritable的典型用法代码示例。如果您正苦于以下问题:Java IntWritable类的具体用法?Java IntWritable怎么用?Java IntWritable使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


IntWritable类属于org.apache.hadoop.io包,在下文中一共展示了IntWritable类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: checkOuterConsistency

import org.apache.hadoop.io.IntWritable; //导入依赖的package包/类
private static void checkOuterConsistency(Job job, Path[] src) 
    throws IOException {
  Path outf = FileOutputFormat.getOutputPath(job);
  FileStatus[] outlist = cluster.getFileSystem().listStatus(outf, new 
                           Utils.OutputFileUtils.OutputFilesFilter());
  assertEquals("number of part files is more than 1. It is" + outlist.length,
    1, outlist.length);
  assertTrue("output file with zero length" + outlist[0].getLen(),
    0 < outlist[0].getLen());
  SequenceFile.Reader r =
    new SequenceFile.Reader(cluster.getFileSystem(),
        outlist[0].getPath(), job.getConfiguration());
  IntWritable k = new IntWritable();
  IntWritable v = new IntWritable();
  while (r.next(k, v)) {
    assertEquals("counts does not match", v.get(),
      countProduct(k, src, job.getConfiguration()));
  }
  r.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:TestJoinDatamerge.java

示例2: main

import org.apache.hadoop.io.IntWritable; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
    if(args.length != 2){
        System.err.println("Usage: MaxTemperatureWithCombiner <input path> <output path>");
        System.exit(-1);
    }

    Job job = new Job();
    job.setJarByClass(MaxTemperatureWithCombiner.class);
    job.setJobName("Max Temperature With Combiner");

    FileInputFormat.addInputPath(job, new Path(args[0]));
    FileOutputFormat.setOutputPath(job, new Path(args[1]));

    job.setMapperClass(MaxTemperatureMapper.class);
    job.setCombinerClass(MaxTemperatureReducer.class);
    job.setReducerClass(MaxTemperatureReducer.class);

    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(IntWritable.class);

    System.exit(job.waitForCompletion(true) ? 0 : 1);
}
 
开发者ID:myziyue,项目名称:learn-to-hadoop,代码行数:23,代码来源:MaxTemperatureWithCombiner.java

示例3: reduce

import org.apache.hadoop.io.IntWritable; //导入依赖的package包/类
public void reduce(IntWritable key, Iterator<Text> values,
    OutputCollector<Text, Text> out,
    Reporter reporter) throws IOException {
  keyVal = key.get();
  while(values.hasNext()) {
    Text value = values.next();
    String towrite = value.toString() + "\n";
    indexStream.write(towrite.getBytes(Charsets.UTF_8));
    written++;
    if (written > numIndexes -1) {
      // every 1000 indexes we report status
      reporter.setStatus("Creating index for archives");
      reporter.progress();
      endIndex = keyVal;
      String masterWrite = startIndex + " " + endIndex + " " + startPos 
                          +  " " + indexStream.getPos() + " \n" ;
      outStream.write(masterWrite.getBytes(Charsets.UTF_8));
      startPos = indexStream.getPos();
      startIndex = endIndex;
      written = 0;
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:HadoopArchives.java

示例4: map

import org.apache.hadoop.io.IntWritable; //导入依赖的package包/类
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {

    String[] keyVal = value.toString().split("\\t");
    double[] Ai = new double[Bh];
    int i = Integer.parseInt(keyVal[0]) - 1;
    String[] values = keyVal[1].split(",");
    for (int j = 0; j < values.length; j++) {
        Ai[j] = Double.parseDouble(values[j]);
    }
    double[] Ci = new double[Bw];
    StringBuilder result = new StringBuilder(prefix);

    for (int j = 0; j < Bw; j++) {
        Ci[j] = 0d;
        for (int k = 0; k < Bh; k++) {
            Ci[j] += Ai[k] * B[k][j];
        }
        result.append(Ci[j]);
        if (j != Bw - 1) {
            result.append(",");
        }
    }
    context.write(new IntWritable(i + 1), new Text(result.toString()));
}
 
开发者ID:Romm17,项目名称:MRNMF,代码行数:26,代码来源:MM1.java

示例5: reduce

import org.apache.hadoop.io.IntWritable; //导入依赖的package包/类
public void reduce(IntWritable key, Iterable<IntWritable> values, 
    Context context) throws IOException, InterruptedException {

  int errors = 0;

  MarkableIterator<IntWritable> mitr = 
    new MarkableIterator<IntWritable>(values.iterator());

  switch (key.get()) {
  case 0:
    errors += test0(key, mitr);
    break;
  case 1:
    errors += test1(key, mitr);
    break;
  case 2:
    errors += test2(key, mitr);
    break;
  case 3:
    errors += test3(key, mitr);
    break;
  default:
    break;
  }
  context.write(key, new IntWritable(errors));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:TestValueIterReset.java

示例6: collect

import org.apache.hadoop.io.IntWritable; //导入依赖的package包/类
@Override
/**
 * 给sql语句中的?赋值的方法
 */
public void collect(Configuration conf, BaseDimension key, BaseStatsValueWritable value, PreparedStatement pstmt,
		IDimensionConverter converter) throws SQLException, IOException {
	StatsUserDimension statsUserDimension = (StatsUserDimension) key;
	MapWritableValue mapWritableValue = (MapWritableValue) value;
	IntWritable newInstallUsers = (IntWritable) mapWritableValue.getValue().get(new IntWritable(-1));

	int i = 0;
	pstmt.setInt(++i, converter.getDimensionIdByValue(statsUserDimension.getStatsCommon().getPlatform()));
	pstmt.setInt(++i, converter.getDimensionIdByValue(statsUserDimension.getStatsCommon().getDate()));
	pstmt.setInt(++i, converter.getDimensionIdByValue(statsUserDimension.getBrowser()));
	pstmt.setInt(++i, newInstallUsers.get());
	pstmt.setString(++i, conf.get(GlobalConstants.RUNNING_DATE_PARAMES));
	pstmt.setInt(++i, newInstallUsers.get());
	pstmt.addBatch();
}
 
开发者ID:liuhaozzu,项目名称:big_data,代码行数:20,代码来源:StatsDeviceBrowserNewInstallUserCollector.java

示例7: map

import org.apache.hadoop.io.IntWritable; //导入依赖的package包/类
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
	String doc = value.toString();
				
	String text = slice(doc, "<text", "</text>", true);
	if (text.length() < 1) return;
	
	char txt[] = text.toLowerCase().toCharArray();
	for (int i = 0; i < txt.length; ++i) {
		if (!((txt[i] >= 'a' && txt[i] <= 'z') || (txt[i] >= 'A' && txt[i] <= 'Z')))
			txt[i] = ' ';
	}
	
	String id = slice(doc, "<id>", "</id>", false);
	if (id.length() < 1) return;
	StringTokenizer itr = new StringTokenizer(String.valueOf(txt));
	int sum = itr.countTokens();
	while (itr.hasMoreTokens()) {
		String s = itr.nextToken();
		word.set(id + '-' + s);
		IntWritable tmp[] = {new IntWritable(sum), new IntWritable(1)};
		IntArrayWritable temp = new IntArrayWritable(tmp);
		context.write(word, temp);
	}
}
 
开发者ID:lzmhhh123,项目名称:Wikipedia-Index,代码行数:26,代码来源:TF.java

示例8: map

import org.apache.hadoop.io.IntWritable; //导入依赖的package包/类
public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {

			String line = value.toString();

			line = line.trim().toLowerCase();
			line = line.replaceAll("[^a-z]+", " ");
			String words[] = line.split("\\s+"); //split by ' ', '\t', '\n', etc.
			
			if(words.length < 2) {
				return;
			}
			
			StringBuilder sb;
			for (int i = 0; i < words.length-1; i++) {
				sb = new StringBuilder();
				for (int j = 0;  i + j < words.length && j < noGram; j++) {
					sb.append(" ");
					sb.append(words[i + j]);
					context.write(new Text(sb.toString().trim()), new IntWritable(1));
				}
			}
		}
 
开发者ID:yogykwan,项目名称:mapreduce-samples,代码行数:23,代码来源:NGramLibraryBuilder.java

示例9: reduce

import org.apache.hadoop.io.IntWritable; //导入依赖的package包/类
@Override
protected void reduce(twoDimensionIndexWritable key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
    if(key.getMatrixKind().equals(MatrixKind.Corpus)) {
        context.write(key, values.iterator().next());
        return ;
    } else if(key.getMatrixKind().equals(MatrixKind.DocTopic)||
            key.getMatrixKind().equals(MatrixKind.TopicWord)){
        int count = 0;
        for(Text text : values) {
            count += Integer.parseInt(text.toString());
        }
        if (key.getMatrixKind().equals(MatrixKind.DocTopic)) {
            writer1.append(new twoDimensionIndexWritable(key.getM(), key.getN()), new IntWritable(count));
        } else {
            writer2.append(new twoDimensionIndexWritable(key.getM(), key.getN()), new IntWritable(count));
        }
    }
    return;

}
 
开发者ID:huyang1,项目名称:LDA,代码行数:21,代码来源:InitReducer.java

示例10: testNestedIterable

import org.apache.hadoop.io.IntWritable; //导入依赖的package包/类
public void testNestedIterable() throws Exception {
  Random r = new Random();
  Writable[] writs = {
    new BooleanWritable(r.nextBoolean()),
    new FloatWritable(r.nextFloat()),
    new FloatWritable(r.nextFloat()),
    new IntWritable(r.nextInt()),
    new LongWritable(r.nextLong()),
    new BytesWritable("dingo".getBytes()),
    new LongWritable(r.nextLong()),
    new IntWritable(r.nextInt()),
    new BytesWritable("yak".getBytes()),
    new IntWritable(r.nextInt())
  };
  TupleWritable sTuple = makeTuple(writs);
  assertTrue("Bad count", writs.length == verifIter(writs, sTuple, 0));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:TestJoinTupleWritable.java

示例11: createJob

import org.apache.hadoop.io.IntWritable; //导入依赖的package包/类
public Job createJob() 
throws IOException {
  Configuration conf = getConf();
  conf.setInt(MRJobConfig.NUM_MAPS, 1);
  Job job = Job.getInstance(conf, "test");
  job.setNumReduceTasks(1);
  job.setJarByClass(CredentialsTestJob.class);
  job.setNumReduceTasks(1);
  job.setMapperClass(CredentialsTestJob.CredentialsTestMapper.class);
  job.setMapOutputKeyClass(IntWritable.class);
  job.setMapOutputValueClass(NullWritable.class);
  job.setReducerClass(CredentialsTestJob.CredentialsTestReducer.class);
  job.setInputFormatClass(SleepJob.SleepInputFormat.class);
  job.setPartitionerClass(SleepJob.SleepJobPartitioner.class);
  job.setOutputFormatClass(NullOutputFormat.class);
  job.setSpeculativeExecution(false);
  job.setJobName("test job");
  FileInputFormat.addInputPath(job, new Path("ignored"));
  return job;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:CredentialsTestJob.java

示例12: main

import org.apache.hadoop.io.IntWritable; //导入依赖的package包/类
public static void main(String[] args) throws Exception {

		Configuration conf = new Configuration();

		Job job = Job.getInstance(conf);
		job.setMapperClass(DataDividerMapper.class);
		job.setReducerClass(DataDividerReducer.class);

		job.setJarByClass(DataDividerByUser.class);

		job.setInputFormatClass(TextInputFormat.class);
		job.setOutputFormatClass(TextOutputFormat.class);
		job.setOutputKeyClass(IntWritable.class);
		job.setOutputValueClass(Text.class);

		TextInputFormat.setInputPaths(job, new Path(args[0]));
		TextOutputFormat.setOutputPath(job, new Path(args[1]));

		job.waitForCompletion(true);
	}
 
开发者ID:yogykwan,项目名称:mapreduce-samples,代码行数:21,代码来源:DataDividerByUser.java

示例13: reduce

import org.apache.hadoop.io.IntWritable; //导入依赖的package包/类
@Test
public void reduce() {
    MaxTemperatureMapRed.MaxTemperatureReduce maxTemperatureReduce = new MaxTemperatureMapRed.MaxTemperatureReduce();
    try {
        List<IntWritable> list = new ArrayList<IntWritable>();
        list.add(new IntWritable(12));
        list.add(new IntWritable(31));
        list.add(new IntWritable(45));
        list.add(new IntWritable(23));
        list.add(new IntWritable(21));
        maxTemperatureReduce.reduce(new Text("1901"), list.iterator(), new OutputCollector<Text, IntWritable>() {
            @Override
            public void collect(final Text text, final IntWritable intWritable) throws IOException {
                log.info(text.toString() + "  " + intWritable.get());
            }
        }, null);
    } catch (IOException e) {
        e.printStackTrace();
    }
}
 
开发者ID:mumuhadoop,项目名称:mumu-mapreduce,代码行数:21,代码来源:MaxTemperatureMapRedTest.java

示例14: main

import org.apache.hadoop.io.IntWritable; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
	Configuration conf = new Configuration();
	Job job = Job.getInstance(conf, "test");
	
	job.setMapperClass(testMapper.class);
	job.setPartitionerClass(testPartitioner.class);
	job.setReducerClass(testReducer.class);
	job.setNumReduceTasks(10);
	
	job.setOutputKeyClass(Text.class);
	job.setOutputValueClass(IntWritable.class);
	
	FileInputFormat.setInputPaths(job, new Path(args[0]));
	FileOutputFormat.setOutputPath(job, new Path(args[1]));

	if (!job.waitForCompletion(true))
		return;
}
 
开发者ID:aadishgoel2013,项目名称:Hadoop-Codes,代码行数:19,代码来源:testDriver.java

示例15: testMapredIntervalSampler

import org.apache.hadoop.io.IntWritable; //导入依赖的package包/类
/**
 * Verify IntervalSampler in mapred.lib.InputSampler, which is added back
 * for binary compatibility of M/R 1.x
 */
@Test (timeout = 30000)
@SuppressWarnings("unchecked") // IntWritable comparator not typesafe
public void testMapredIntervalSampler() throws Exception {
  final int TOT_SPLITS = 16;
  final int PER_SPLIT_SAMPLE = 4;
  final int NUM_SAMPLES = TOT_SPLITS * PER_SPLIT_SAMPLE;
  final double FREQ = 1.0 / TOT_SPLITS;
  org.apache.hadoop.mapred.lib.InputSampler.Sampler<IntWritable,NullWritable>
      sampler = new org.apache.hadoop.mapred.lib.InputSampler.IntervalSampler
          <IntWritable,NullWritable>(FREQ, NUM_SAMPLES);
  int inits[] = new int[TOT_SPLITS];
  for (int i = 0; i < TOT_SPLITS; ++i) {
    inits[i] = i;
  }
  Job ignored = Job.getInstance();
  Object[] samples = sampler.getSample(new TestInputSamplerIF(
        NUM_SAMPLES, TOT_SPLITS, inits), ignored);
  assertEquals(NUM_SAMPLES, samples.length);
  Arrays.sort(samples, new IntWritable.Comparator());
  for (int i = 0; i < NUM_SAMPLES; ++i) {
    assertEquals(i,
        ((IntWritable)samples[i]).get());
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:TestInputSampler.java


注:本文中的org.apache.hadoop.io.IntWritable类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。