本文整理匯總了Java中org.apache.hadoop.mapreduce.TaskInputOutputContext.write方法的典型用法代碼示例。如果您正苦於以下問題:Java TaskInputOutputContext.write方法的具體用法?Java TaskInputOutputContext.write怎麽用?Java TaskInputOutputContext.write使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.mapreduce.TaskInputOutputContext
的用法示例。
在下文中一共展示了TaskInputOutputContext.write方法的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: compute
import org.apache.hadoop.mapreduce.TaskInputOutputContext; //導入方法依賴的package包/類
/** Compute sigma */
static void compute(Summation sigma,
TaskInputOutputContext<?, ?, NullWritable, TaskResult> context
) throws IOException, InterruptedException {
String s;
LOG.info(s = "sigma=" + sigma);
context.setStatus(s);
final long start = System.currentTimeMillis();
sigma.compute();
final long duration = System.currentTimeMillis() - start;
final TaskResult result = new TaskResult(sigma, duration);
LOG.info(s = "result=" + result);
context.setStatus(s);
context.write(NullWritable.get(), result);
}
示例2: dispatchMapReduce
import org.apache.hadoop.mapreduce.TaskInputOutputContext; //導入方法依賴的package包/類
/** Call the map-reduce Javascript function with the given
* arguments. Save the key-value result in the task's context
* @param context Task context
* @param script The Javascript interpreter
* @param f The map-reduce javascript object
* @param args The key-value arguments
*/
@SuppressWarnings("unchecked")
public void dispatchMapReduce (TaskInputOutputContext context,
Function f,
Scriptable thisObj,
Object[] args,
Tuple key,
Tuple value)
throws IOException, InterruptedException
{
Object ret = callMapReduce(f, thisObj, args, key, value);
if (ret instanceof NativeGenerator) {
NativeGenerator gen = (NativeGenerator) ret;
Function next = (Function) gen.getProperty(gen, "next");
while (callMapReduce(next, gen, null, key, value) != null)
context.write(key, value);
} else if (ret != null) {
context.write(key, value);
}
}
示例3: putBulkLoadMulti
import org.apache.hadoop.mapreduce.TaskInputOutputContext; //導入方法依賴的package包/類
/**
* Writes the current row operation as a Put, for use with
* HMultiFileOutputFormat
*
* @param context
* @throws IOException
* @throws InterruptedException
*/
public void putBulkLoadMulti(TaskInputOutputContext<? extends Object, ? extends Object, ImmutableBytesWritable, Put> context) throws IOException, InterruptedException {
// if (partitioner == null) {
// partitioner = new TotalOrderPartitioner();
// partitioner.setConf(context.getConfiguration());
// }
ImmutableBytesWritable key = new ImmutableBytesWritable(
BulkOutputFormat.makeKey(getStringName(), rowkey)
);
Put put = createPut();
context.write(key, put);
values = new ArrayMap3();
}
示例4: putBulkLoadPut
import org.apache.hadoop.mapreduce.TaskInputOutputContext; //導入方法依賴的package包/類
public void putBulkLoadPut(TaskInputOutputContext<? extends Object, ? extends Object, ImmutableBytesWritable, Put> context) throws IOException, InterruptedException {
// if (partitioner == null) {
// partitioner = new TotalOrderPartitioner();
// partitioner.setConf(context.getConfiguration());
// }
ImmutableBytesWritable key = new ImmutableBytesWritable(rowkey);
Put put = createPut();
context.write(key, put);
values = new ArrayMap3();
}