本文整理汇总了Java中org.apache.hadoop.io.ObjectWritable类的典型用法代码示例。如果您正苦于以下问题:Java ObjectWritable类的具体用法?Java ObjectWritable怎么用?Java ObjectWritable使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
ObjectWritable类属于org.apache.hadoop.io包,在下文中一共展示了ObjectWritable类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getClient
import org.apache.hadoop.io.ObjectWritable; //导入依赖的package包/类
/**
* Construct & cache an IPC client with the user-provided SocketFactory
* if no cached client exists.
*
* @param conf Configuration
* @return an IPC client
*/
private synchronized Client getClient(Configuration conf,
SocketFactory factory) {
// Construct & cache client. The configuration is only used for timeout,
// and Clients have connection pools. So we can either (a) lose some
// connection pooling and leak sockets, or (b) use the same timeout for all
// configurations. Since the IPC is usually intended globally, not
// per-job, we choose (a).
Client client = clients.get(factory);
if (client == null) {
client = new Client(ObjectWritable.class, conf, factory);
clients.put(factory, client);
} else {
client.incCount();
}
return client;
}
示例2: invoke
import org.apache.hadoop.io.ObjectWritable; //导入依赖的package包/类
@Override
public Object invoke(Object proxy, Method method, Object[] args)
throws Throwable {
final boolean logDebug = LOG.isDebugEnabled();
long startTime = 0;
if (logDebug) {
startTime = System.currentTimeMillis();
}
ObjectWritable value = (ObjectWritable)
//
client.call(new Invocation(method, args), remoteId);
if (logDebug) {
long callTime = System.currentTimeMillis() - startTime;
LOG.debug("Call: " + method.getName() + " " + callTime);
}
return value.get();
}
示例3: map
import org.apache.hadoop.io.ObjectWritable; //导入依赖的package包/类
/**
* Wrap values in ObjectWritable.
*/
public void map(Text key, Writable value,
OutputCollector<Text, ObjectWritable> output, Reporter reporter)
throws IOException {
ObjectWritable objWrite = new ObjectWritable();
Writable cloned = null;
if (value instanceof LinkDatum) {
cloned = new Text(((LinkDatum)value).getUrl());
}
else {
cloned = WritableUtils.clone(value, conf);
}
objWrite.set(cloned);
output.collect(key, objWrite);
}
示例4: getClient
import org.apache.hadoop.io.ObjectWritable; //导入依赖的package包/类
/**
* Construct & cache an IPC client with the user-provided SocketFactory
* if no cached client exists.
*
* @param conf Configuration
* @return an IPC client
*/
private synchronized Client getClient(Configuration conf,
SocketFactory factory) {
// Construct & cache client. The configuration is only used for timeout,
// and Clients have connection pools. So we can either (a) lose some
// connection pooling and leak sockets, or (b) use the same timeout for all
// configurations. Since the IPC is usually intended globally, not
// per-job, we choose (a).
Client client = clients.get(factory);
if (client == null) {
client = new Client(ObjectWritable.class, conf, factory);
clients.put(factory, client);
} else {
client.incCount();
}
return client;
}
示例5: invoke
import org.apache.hadoop.io.ObjectWritable; //导入依赖的package包/类
public Object invoke(Object proxy, Method method, Object[] args)
throws Throwable {
final boolean logDebug = LOG.isDebugEnabled();
long startTime = 0;
if (logDebug) {
startTime = System.currentTimeMillis();
}
ObjectWritable value = (ObjectWritable)
client.call(new Invocation(method, args), remoteId);
if (logDebug) {
long callTime = System.currentTimeMillis() - startTime;
LOG.debug("Call: " + method.getName() + " " + callTime);
}
return value.get();
}
示例6: readFields
import org.apache.hadoop.io.ObjectWritable; //导入依赖的package包/类
@Override
public void readFields(DataInput in) throws IOException {
fieldName = Text.readString(in);
mapKey = new Utf8(Text.readString(in));
filterOp = WritableUtils.readEnum(in, FilterOp.class);
operands.clear();
int operandsSize = WritableUtils.readVInt(in);
for (int i = 0; i < operandsSize; i++) {
Object operand = ObjectWritable.readObject(in, conf);
if (operand instanceof String) {
operand = new Utf8((String) operand);
}
operands.add(operand);
}
filterIfMissing = in.readBoolean();
}
示例7: reduceNativeValues
import org.apache.hadoop.io.ObjectWritable; //导入依赖的package包/类
@Override
protected void reduceNativeValues(
final GeoWaveInputKey key,
final Iterable<Object> values,
final ReduceContext<GeoWaveInputKey, ObjectWritable, GeoWaveInputKey, Object> context )
throws IOException,
InterruptedException {
final GridCoverage mergedCoverage = helper.getMergedCoverage(
key,
values);
if (mergedCoverage != null) {
context.write(
key,
mergedCoverage);
}
}
示例8: reduceNativeValues
import org.apache.hadoop.io.ObjectWritable; //导入依赖的package包/类
@Override
protected void reduceNativeValues(
final GeoWaveInputKey key,
final Iterable<Object> values,
final Reducer<GeoWaveInputKey, ObjectWritable, GeoWaveOutputKey, GridCoverage>.Context context )
throws IOException,
InterruptedException {
final GridCoverage mergedCoverage = helper.getMergedCoverage(
key,
values);
if (mergedCoverage != null) {
context.write(
helper.getGeoWaveOutputKey(),
mergedCoverage);
}
}
示例9: mapWritableValue
import org.apache.hadoop.io.ObjectWritable; //导入依赖的package包/类
@Override
protected void mapWritableValue(
final GeoWaveInputKey key,
final ObjectWritable value,
final Mapper<GeoWaveInputKey, ObjectWritable, GeoWaveInputKey, ObjectWritable>.Context context )
throws IOException,
InterruptedException {
// cached for efficiency since the output is the input object
// the de-serialized input object is only used for sampling.
// For simplicity, allow the de-serialization to occur in all cases,
// even though some sampling
// functions do not inspect the input object.
currentValue = value;
super.mapWritableValue(
key,
value,
context);
}
示例10: mapNativeValue
import org.apache.hadoop.io.ObjectWritable; //导入依赖的package包/类
@Override
protected void mapNativeValue(
final GeoWaveInputKey key,
final Object value,
final org.apache.hadoop.mapreduce.Mapper<GeoWaveInputKey, ObjectWritable, GeoWaveInputKey, ObjectWritable>.Context context )
throws IOException,
InterruptedException {
@SuppressWarnings("unchecked")
final AnalyticItemWrapper<T> wrapper = itemWrapperFactory.create((T) value);
outputKey.setAdapterId(key.getAdapterId());
outputKey.setDataId(new ByteArrayId(
StringUtils.stringToBinary(nestedGroupCentroidAssigner.getGroupForLevel(wrapper))));
outputKey.setInsertionId(key.getInsertionId());
context.write(
outputKey,
currentValue);
}
示例11: setup
import org.apache.hadoop.io.ObjectWritable; //导入依赖的package包/类
@Override
protected void setup(
final Reducer<GeoWaveInputKey, ObjectWritable, GeoWaveOutputKey, Object>.Context context )
throws IOException,
InterruptedException {
super.setup(context);
final ScopedJobConfiguration config = new ScopedJobConfiguration(
context.getConfiguration(),
InputToOutputKeyReducer.class,
LOGGER);
final ByteArrayId indexId = new ByteArrayId(
config.getString(
OutputParameters.Output.INDEX_ID,
"na"));
final List<ByteArrayId> indexIds = new ArrayList<ByteArrayId>();
indexIds.add(indexId);
outputKey = new GeoWaveOutputKey(
new ByteArrayId(
"na"),
indexIds);
}
示例12: reduceNativeValues
import org.apache.hadoop.io.ObjectWritable; //导入依赖的package包/类
@Override
protected void reduceNativeValues(
final GeoWaveInputKey key,
final Iterable<Object> values,
final ReduceContext<GeoWaveInputKey, ObjectWritable, GeoWaveInputKey, Object> context )
throws IOException,
InterruptedException {
final Iterator<Object> valIt = values.iterator();
if (valIt.hasNext()) {
key.setAdapterId(outputAdapter.getAdapterId());
final SimpleFeature feature = getSimpleFeature(
key,
valIt.next());
context.write(
key,
feature);
}
}
示例13: configure
import org.apache.hadoop.io.ObjectWritable; //导入依赖的package包/类
@Override
public void configure(
final Job job )
throws Exception {
job.setMapperClass(Mapper.class);
job.setReducerClass(InputToOutputKeyReducer.class);
job.setMapOutputKeyClass(GeoWaveInputKey.class);
job.setMapOutputValueClass(ObjectWritable.class);
job.setOutputKeyClass(GeoWaveOutputKey.class);
job.setOutputValueClass(Object.class);
job.setSpeculativeExecution(false);
job.setJobName("GeoWave Input to Output");
job.setReduceSpeculativeExecution(false);
}
示例14: mapNativeValue
import org.apache.hadoop.io.ObjectWritable; //导入依赖的package包/类
@Override
protected void mapNativeValue(
final GeoWaveInputKey key,
final Object value,
final org.apache.hadoop.mapreduce.Mapper<GeoWaveInputKey, ObjectWritable, GeoWaveInputKey, ObjectWritable>.Context context )
throws IOException,
InterruptedException {
@SuppressWarnings("unchecked")
final double rank = samplingFunction.rank(
sampleSize,
(T) value);
if (rank > 0.0000000001) {
final AnalyticItemWrapper<Object> wrapper = itemWrapperFactory.create(value);
outputKey.setDataId(new ByteArrayId(
keyManager.putData(
nestedGroupCentroidAssigner.getGroupForLevel(wrapper),
1.0 - rank, // sorts in ascending order
key.getDataId().getBytes())));
outputKey.setAdapterId(key.getAdapterId());
outputKey.setInsertionId(key.getInsertionId());
context.write(
outputKey,
currentValue);
}
}
示例15: mapNativeValue
import org.apache.hadoop.io.ObjectWritable; //导入依赖的package包/类
@Override
protected void mapNativeValue(
final GeoWaveInputKey key,
final Object value,
final org.apache.hadoop.mapreduce.Mapper<GeoWaveInputKey, ObjectWritable, GroupIDText, BytesWritable>.Context context )
throws IOException,
InterruptedException {
final AnalyticItemWrapper<Object> item = itemWrapperFactory.create(value);
nestedGroupCentroidAssigner.findCentroidForLevel(
item,
centroidAssociationFn);
final byte[] outData = association.toBinary();
outputValWritable.set(
outData,
0,
outData.length);
context.write(
outputKeyWritable,
outputValWritable);
}