本文整理匯總了Java中org.apache.hadoop.io.IntWritable類的典型用法代碼示例。如果您正苦於以下問題:Java IntWritable類的具體用法?Java IntWritable怎麽用?Java IntWritable使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
IntWritable類屬於org.apache.hadoop.io包,在下文中一共展示了IntWritable類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: checkOuterConsistency
import org.apache.hadoop.io.IntWritable; //導入依賴的package包/類
private static void checkOuterConsistency(Job job, Path[] src)
throws IOException {
Path outf = FileOutputFormat.getOutputPath(job);
FileStatus[] outlist = cluster.getFileSystem().listStatus(outf, new
Utils.OutputFileUtils.OutputFilesFilter());
assertEquals("number of part files is more than 1. It is" + outlist.length,
1, outlist.length);
assertTrue("output file with zero length" + outlist[0].getLen(),
0 < outlist[0].getLen());
SequenceFile.Reader r =
new SequenceFile.Reader(cluster.getFileSystem(),
outlist[0].getPath(), job.getConfiguration());
IntWritable k = new IntWritable();
IntWritable v = new IntWritable();
while (r.next(k, v)) {
assertEquals("counts does not match", v.get(),
countProduct(k, src, job.getConfiguration()));
}
r.close();
}
示例2: main
import org.apache.hadoop.io.IntWritable; //導入依賴的package包/類
public static void main(String[] args) throws Exception {
if(args.length != 2){
System.err.println("Usage: MaxTemperatureWithCombiner <input path> <output path>");
System.exit(-1);
}
Job job = new Job();
job.setJarByClass(MaxTemperatureWithCombiner.class);
job.setJobName("Max Temperature With Combiner");
FileInputFormat.addInputPath(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
job.setMapperClass(MaxTemperatureMapper.class);
job.setCombinerClass(MaxTemperatureReducer.class);
job.setReducerClass(MaxTemperatureReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
示例3: reduce
import org.apache.hadoop.io.IntWritable; //導入依賴的package包/類
public void reduce(IntWritable key, Iterator<Text> values,
OutputCollector<Text, Text> out,
Reporter reporter) throws IOException {
keyVal = key.get();
while(values.hasNext()) {
Text value = values.next();
String towrite = value.toString() + "\n";
indexStream.write(towrite.getBytes(Charsets.UTF_8));
written++;
if (written > numIndexes -1) {
// every 1000 indexes we report status
reporter.setStatus("Creating index for archives");
reporter.progress();
endIndex = keyVal;
String masterWrite = startIndex + " " + endIndex + " " + startPos
+ " " + indexStream.getPos() + " \n" ;
outStream.write(masterWrite.getBytes(Charsets.UTF_8));
startPos = indexStream.getPos();
startIndex = endIndex;
written = 0;
}
}
}
示例4: map
import org.apache.hadoop.io.IntWritable; //導入依賴的package包/類
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
String[] keyVal = value.toString().split("\\t");
double[] Ai = new double[Bh];
int i = Integer.parseInt(keyVal[0]) - 1;
String[] values = keyVal[1].split(",");
for (int j = 0; j < values.length; j++) {
Ai[j] = Double.parseDouble(values[j]);
}
double[] Ci = new double[Bw];
StringBuilder result = new StringBuilder(prefix);
for (int j = 0; j < Bw; j++) {
Ci[j] = 0d;
for (int k = 0; k < Bh; k++) {
Ci[j] += Ai[k] * B[k][j];
}
result.append(Ci[j]);
if (j != Bw - 1) {
result.append(",");
}
}
context.write(new IntWritable(i + 1), new Text(result.toString()));
}
示例5: reduce
import org.apache.hadoop.io.IntWritable; //導入依賴的package包/類
public void reduce(IntWritable key, Iterable<IntWritable> values,
Context context) throws IOException, InterruptedException {
int errors = 0;
MarkableIterator<IntWritable> mitr =
new MarkableIterator<IntWritable>(values.iterator());
switch (key.get()) {
case 0:
errors += test0(key, mitr);
break;
case 1:
errors += test1(key, mitr);
break;
case 2:
errors += test2(key, mitr);
break;
case 3:
errors += test3(key, mitr);
break;
default:
break;
}
context.write(key, new IntWritable(errors));
}
示例6: collect
import org.apache.hadoop.io.IntWritable; //導入依賴的package包/類
@Override
/**
* 給sql語句中的?賦值的方法
*/
public void collect(Configuration conf, BaseDimension key, BaseStatsValueWritable value, PreparedStatement pstmt,
IDimensionConverter converter) throws SQLException, IOException {
StatsUserDimension statsUserDimension = (StatsUserDimension) key;
MapWritableValue mapWritableValue = (MapWritableValue) value;
IntWritable newInstallUsers = (IntWritable) mapWritableValue.getValue().get(new IntWritable(-1));
int i = 0;
pstmt.setInt(++i, converter.getDimensionIdByValue(statsUserDimension.getStatsCommon().getPlatform()));
pstmt.setInt(++i, converter.getDimensionIdByValue(statsUserDimension.getStatsCommon().getDate()));
pstmt.setInt(++i, converter.getDimensionIdByValue(statsUserDimension.getBrowser()));
pstmt.setInt(++i, newInstallUsers.get());
pstmt.setString(++i, conf.get(GlobalConstants.RUNNING_DATE_PARAMES));
pstmt.setInt(++i, newInstallUsers.get());
pstmt.addBatch();
}
示例7: map
import org.apache.hadoop.io.IntWritable; //導入依賴的package包/類
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
String doc = value.toString();
String text = slice(doc, "<text", "</text>", true);
if (text.length() < 1) return;
char txt[] = text.toLowerCase().toCharArray();
for (int i = 0; i < txt.length; ++i) {
if (!((txt[i] >= 'a' && txt[i] <= 'z') || (txt[i] >= 'A' && txt[i] <= 'Z')))
txt[i] = ' ';
}
String id = slice(doc, "<id>", "</id>", false);
if (id.length() < 1) return;
StringTokenizer itr = new StringTokenizer(String.valueOf(txt));
int sum = itr.countTokens();
while (itr.hasMoreTokens()) {
String s = itr.nextToken();
word.set(id + '-' + s);
IntWritable tmp[] = {new IntWritable(sum), new IntWritable(1)};
IntArrayWritable temp = new IntArrayWritable(tmp);
context.write(word, temp);
}
}
示例8: map
import org.apache.hadoop.io.IntWritable; //導入依賴的package包/類
public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
String line = value.toString();
line = line.trim().toLowerCase();
line = line.replaceAll("[^a-z]+", " ");
String words[] = line.split("\\s+"); //split by ' ', '\t', '\n', etc.
if(words.length < 2) {
return;
}
StringBuilder sb;
for (int i = 0; i < words.length-1; i++) {
sb = new StringBuilder();
for (int j = 0; i + j < words.length && j < noGram; j++) {
sb.append(" ");
sb.append(words[i + j]);
context.write(new Text(sb.toString().trim()), new IntWritable(1));
}
}
}
示例9: reduce
import org.apache.hadoop.io.IntWritable; //導入依賴的package包/類
@Override
protected void reduce(twoDimensionIndexWritable key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
if(key.getMatrixKind().equals(MatrixKind.Corpus)) {
context.write(key, values.iterator().next());
return ;
} else if(key.getMatrixKind().equals(MatrixKind.DocTopic)||
key.getMatrixKind().equals(MatrixKind.TopicWord)){
int count = 0;
for(Text text : values) {
count += Integer.parseInt(text.toString());
}
if (key.getMatrixKind().equals(MatrixKind.DocTopic)) {
writer1.append(new twoDimensionIndexWritable(key.getM(), key.getN()), new IntWritable(count));
} else {
writer2.append(new twoDimensionIndexWritable(key.getM(), key.getN()), new IntWritable(count));
}
}
return;
}
示例10: testNestedIterable
import org.apache.hadoop.io.IntWritable; //導入依賴的package包/類
public void testNestedIterable() throws Exception {
Random r = new Random();
Writable[] writs = {
new BooleanWritable(r.nextBoolean()),
new FloatWritable(r.nextFloat()),
new FloatWritable(r.nextFloat()),
new IntWritable(r.nextInt()),
new LongWritable(r.nextLong()),
new BytesWritable("dingo".getBytes()),
new LongWritable(r.nextLong()),
new IntWritable(r.nextInt()),
new BytesWritable("yak".getBytes()),
new IntWritable(r.nextInt())
};
TupleWritable sTuple = makeTuple(writs);
assertTrue("Bad count", writs.length == verifIter(writs, sTuple, 0));
}
示例11: createJob
import org.apache.hadoop.io.IntWritable; //導入依賴的package包/類
public Job createJob()
throws IOException {
Configuration conf = getConf();
conf.setInt(MRJobConfig.NUM_MAPS, 1);
Job job = Job.getInstance(conf, "test");
job.setNumReduceTasks(1);
job.setJarByClass(CredentialsTestJob.class);
job.setNumReduceTasks(1);
job.setMapperClass(CredentialsTestJob.CredentialsTestMapper.class);
job.setMapOutputKeyClass(IntWritable.class);
job.setMapOutputValueClass(NullWritable.class);
job.setReducerClass(CredentialsTestJob.CredentialsTestReducer.class);
job.setInputFormatClass(SleepJob.SleepInputFormat.class);
job.setPartitionerClass(SleepJob.SleepJobPartitioner.class);
job.setOutputFormatClass(NullOutputFormat.class);
job.setSpeculativeExecution(false);
job.setJobName("test job");
FileInputFormat.addInputPath(job, new Path("ignored"));
return job;
}
示例12: main
import org.apache.hadoop.io.IntWritable; //導入依賴的package包/類
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
Job job = Job.getInstance(conf);
job.setMapperClass(DataDividerMapper.class);
job.setReducerClass(DataDividerReducer.class);
job.setJarByClass(DataDividerByUser.class);
job.setInputFormatClass(TextInputFormat.class);
job.setOutputFormatClass(TextOutputFormat.class);
job.setOutputKeyClass(IntWritable.class);
job.setOutputValueClass(Text.class);
TextInputFormat.setInputPaths(job, new Path(args[0]));
TextOutputFormat.setOutputPath(job, new Path(args[1]));
job.waitForCompletion(true);
}
示例13: reduce
import org.apache.hadoop.io.IntWritable; //導入依賴的package包/類
@Test
public void reduce() {
MaxTemperatureMapRed.MaxTemperatureReduce maxTemperatureReduce = new MaxTemperatureMapRed.MaxTemperatureReduce();
try {
List<IntWritable> list = new ArrayList<IntWritable>();
list.add(new IntWritable(12));
list.add(new IntWritable(31));
list.add(new IntWritable(45));
list.add(new IntWritable(23));
list.add(new IntWritable(21));
maxTemperatureReduce.reduce(new Text("1901"), list.iterator(), new OutputCollector<Text, IntWritable>() {
@Override
public void collect(final Text text, final IntWritable intWritable) throws IOException {
log.info(text.toString() + " " + intWritable.get());
}
}, null);
} catch (IOException e) {
e.printStackTrace();
}
}
示例14: main
import org.apache.hadoop.io.IntWritable; //導入依賴的package包/類
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
Job job = Job.getInstance(conf, "test");
job.setMapperClass(testMapper.class);
job.setPartitionerClass(testPartitioner.class);
job.setReducerClass(testReducer.class);
job.setNumReduceTasks(10);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
FileInputFormat.setInputPaths(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
if (!job.waitForCompletion(true))
return;
}
示例15: testMapredIntervalSampler
import org.apache.hadoop.io.IntWritable; //導入依賴的package包/類
/**
* Verify IntervalSampler in mapred.lib.InputSampler, which is added back
* for binary compatibility of M/R 1.x
*/
@Test (timeout = 30000)
@SuppressWarnings("unchecked") // IntWritable comparator not typesafe
public void testMapredIntervalSampler() throws Exception {
final int TOT_SPLITS = 16;
final int PER_SPLIT_SAMPLE = 4;
final int NUM_SAMPLES = TOT_SPLITS * PER_SPLIT_SAMPLE;
final double FREQ = 1.0 / TOT_SPLITS;
org.apache.hadoop.mapred.lib.InputSampler.Sampler<IntWritable,NullWritable>
sampler = new org.apache.hadoop.mapred.lib.InputSampler.IntervalSampler
<IntWritable,NullWritable>(FREQ, NUM_SAMPLES);
int inits[] = new int[TOT_SPLITS];
for (int i = 0; i < TOT_SPLITS; ++i) {
inits[i] = i;
}
Job ignored = Job.getInstance();
Object[] samples = sampler.getSample(new TestInputSamplerIF(
NUM_SAMPLES, TOT_SPLITS, inits), ignored);
assertEquals(NUM_SAMPLES, samples.length);
Arrays.sort(samples, new IntWritable.Comparator());
for (int i = 0; i < NUM_SAMPLES; ++i) {
assertEquals(i,
((IntWritable)samples[i]).get());
}
}