本文整理匯總了Java中org.apache.hadoop.io.IntWritable.set方法的典型用法代碼示例。如果您正苦於以下問題:Java IntWritable.set方法的具體用法?Java IntWritable.set怎麽用?Java IntWritable.set使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.io.IntWritable
的用法示例。
在下文中一共展示了IntWritable.set方法的9個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: map
import org.apache.hadoop.io.IntWritable; //導入方法依賴的package包/類
public void map(IntWritable key, TupleWritable val, Context context)
throws IOException, InterruptedException {
int k = key.get();
final String kvstr = "Unexpected tuple: " + stringify(key, val);
assertTrue(kvstr, 0 == k % (srcs * srcs));
for (int i = 0; i < val.size(); ++i) {
final int vali = ((IntWritable)val.get(i)).get();
assertTrue(kvstr, (vali - i) * srcs == 10 * k);
}
context.write(key, one);
// If the user modifies the key or any of the values in the tuple, it
// should not affect the rest of the join.
key.set(-1);
if (val.has(0)) {
((IntWritable)val.get(0)).set(0);
}
}
示例2: getBlock
import org.apache.hadoop.io.IntWritable; //導入方法依賴的package包/類
private static String getBlock(String str, char open, char close,
IntWritable index) throws ParseException {
StringBuilder split = new StringBuilder();
int next = StringUtils.findNext(str, open, StringUtils.ESCAPE_CHAR,
index.get(), split);
split.setLength(0); // clear the buffer
if (next >= 0) {
++next; // move over '('
next = StringUtils.findNext(str, close, StringUtils.ESCAPE_CHAR,
next, split);
if (next >= 0) {
++next; // move over ')'
index.set(next);
return split.toString(); // found a block
} else {
throw new ParseException("Unexpected end of block", next);
}
}
return null; // found nothing
}
示例3: write
import org.apache.hadoop.io.IntWritable; //導入方法依賴的package包/類
public void write(DataOutput dataOutput) throws IOException {
Text text = new Text(wifiProb==null?"":wifiProb);
text.write(dataOutput);
IntWritable intWritable = new IntWritable();
intWritable.set(inNoOutWifi);
intWritable.write(dataOutput);
intWritable.set(inNoOutStore);
intWritable.write(dataOutput);
intWritable.set(outNoInWifi);
intWritable.write(dataOutput);
intWritable.set(outNoInStore);
intWritable.write(dataOutput);
intWritable.set(inAndOutWifi);
intWritable.write(dataOutput);
intWritable.set(inAndOutStore);
intWritable.write(dataOutput);
intWritable.set(stayInWifi);
intWritable.write(dataOutput);
intWritable.set(stayInStore);
intWritable.write(dataOutput);
DoubleWritable doubleWritable = new DoubleWritable();
doubleWritable.set(jumpRate);
doubleWritable.write(dataOutput);
doubleWritable.set(deepVisit);
doubleWritable.write(dataOutput);
doubleWritable.set(inStoreRate);
doubleWritable.write(dataOutput);
}
示例4: writeSimpleSrc
import org.apache.hadoop.io.IntWritable; //導入方法依賴的package包/類
private static Path[] writeSimpleSrc(Path testdir, Configuration conf,
int srcs) throws IOException {
SequenceFile.Writer out[] = null;
Path[] src = new Path[srcs];
try {
out = createWriters(testdir, conf, srcs, src);
final int capacity = srcs * 2 + 1;
IntWritable key = new IntWritable();
IntWritable val = new IntWritable();
for (int k = 0; k < capacity; ++k) {
for (int i = 0; i < srcs; ++i) {
key.set(k % srcs == 0 ? k * srcs : k * srcs + i);
val.set(10 * k + i);
out[i].append(key, val);
if (i == k) {
// add duplicate key
out[i].append(key, val);
}
}
}
} finally {
if (out != null) {
for (int i = 0; i < srcs; ++i) {
if (out[i] != null)
out[i].close();
}
}
}
return src;
}
示例5: map
import org.apache.hadoop.io.IntWritable; //導入方法依賴的package包/類
public void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
IntWritable outKey = new IntWritable();
IntWritable outValue = new IntWritable();
for (int j = 0; j < NUM_TESTS; j++) {
for (int i = 0; i < NUM_VALUES; i++) {
outKey.set(j);
outValue.set(i);
context.write(outKey, outValue);
}
}
}
示例6: hostFailed
import org.apache.hadoop.io.IntWritable; //導入方法依賴的package包/類
public synchronized void hostFailed(String hostname) {
if (hostFailures.containsKey(hostname)) {
IntWritable x = hostFailures.get(hostname);
x.set(x.get() + 1);
} else {
hostFailures.put(hostname, new IntWritable(1));
}
}
示例7: binaryProtocolStub
import org.apache.hadoop.io.IntWritable; //導入方法依賴的package包/類
public void binaryProtocolStub() {
try {
initSoket();
// output code
WritableUtils.writeVInt(dataOut, 50);
IntWritable wt = new IntWritable();
wt.set(123);
writeObject(wt, dataOut);
writeObject(new Text("value"), dataOut);
// PARTITIONED_OUTPUT
WritableUtils.writeVInt(dataOut, 51);
WritableUtils.writeVInt(dataOut, 0);
writeObject(wt, dataOut);
writeObject(new Text("value"), dataOut);
// STATUS
WritableUtils.writeVInt(dataOut, 52);
Text.writeString(dataOut, "PROGRESS");
dataOut.flush();
// progress
WritableUtils.writeVInt(dataOut, 53);
dataOut.writeFloat(0.55f);
// register counter
WritableUtils.writeVInt(dataOut, 55);
// id
WritableUtils.writeVInt(dataOut, 0);
Text.writeString(dataOut, "group");
Text.writeString(dataOut, "name");
// increment counter
WritableUtils.writeVInt(dataOut, 56);
WritableUtils.writeVInt(dataOut, 0);
WritableUtils.writeVLong(dataOut, 2);
// map item
int intValue = WritableUtils.readVInt(dataInput);
System.out.println("intValue:" + intValue);
IntWritable iw = new IntWritable();
readObject(iw, dataInput);
System.out.println("key:" + iw.get());
Text txt = new Text();
readObject(txt, dataInput);
System.out.println("value:" + txt.toString());
// done
// end of session
WritableUtils.writeVInt(dataOut, 54);
System.out.println("finish");
dataOut.flush();
dataOut.close();
} catch (Exception x) {
x.printStackTrace();
} finally {
closeSoket();
}
}
示例8: copyFailed
import org.apache.hadoop.io.IntWritable; //導入方法依賴的package包/類
public synchronized void copyFailed(TaskAttemptID mapId, MapHost host,
boolean readError, boolean connectExcpt) {
host.penalize();
int failures = 1;
if (failureCounts.containsKey(mapId)) {
IntWritable x = failureCounts.get(mapId);
x.set(x.get() + 1);
failures = x.get();
} else {
failureCounts.put(mapId, new IntWritable(1));
}
String hostname = host.getHostName();
IntWritable hostFailedNum = hostFailures.get(hostname);
// MAPREDUCE-6361: hostname could get cleanup from hostFailures in another
// thread with copySucceeded.
// In this case, add back hostname to hostFailures to get rid of NPE issue.
if (hostFailedNum == null) {
hostFailures.put(hostname, new IntWritable(1));
}
//report failure if already retried maxHostFailures times
boolean hostFail = hostFailures.get(hostname).get() >
getMaxHostFailures() ? true : false;
if (failures >= abortFailureLimit) {
try {
throw new IOException(failures + " failures downloading " + mapId);
} catch (IOException ie) {
reporter.reportException(ie);
}
}
checkAndInformMRAppMaster(failures, mapId, readError, connectExcpt,
hostFail);
checkReducerHealth();
long delay = (long) (INITIAL_PENALTY *
Math.pow(PENALTY_GROWTH_RATE, failures));
if (delay > maxDelay) {
delay = maxDelay;
}
penalties.add(new Penalty(host, delay));
failedShuffleCounter.increment(1);
}
示例9: writeInt
import org.apache.hadoop.io.IntWritable; //導入方法依賴的package包/類
/** write the int value */
static void writeInt(int value, DataOutputStream out) throws IOException {
IntWritable uInt = TL_DATA.get().U_INT;
uInt.set(value);
uInt.write(out);
}