本文整理汇总了Java中org.apache.hadoop.io.serializer.Deserializer.close方法的典型用法代码示例。如果您正苦于以下问题:Java Deserializer.close方法的具体用法?Java Deserializer.close怎么用?Java Deserializer.close使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.io.serializer.Deserializer
的用法示例。
在下文中一共展示了Deserializer.close方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: serDeser
import org.apache.hadoop.io.serializer.Deserializer; //导入方法依赖的package包/类
private <K> K serDeser(K conf) throws Exception {
SerializationFactory factory = new SerializationFactory(CONF);
Serializer<K> serializer =
factory.getSerializer(GenericsUtil.getClass(conf));
Deserializer<K> deserializer =
factory.getDeserializer(GenericsUtil.getClass(conf));
DataOutputBuffer out = new DataOutputBuffer();
serializer.open(out);
serializer.serialize(conf);
serializer.close();
DataInputBuffer in = new DataInputBuffer();
in.reset(out.getData(), out.getLength());
deserializer.open(in);
K after = deserializer.deserialize(null);
deserializer.close();
return after;
}
示例2: makeCopyForPassByValue
import org.apache.hadoop.io.serializer.Deserializer; //导入方法依赖的package包/类
private <E> E makeCopyForPassByValue(Serialization<E> serialization,
E obj) throws IOException {
Serializer<E> ser =
serialization.getSerializer(GenericsUtil.getClass(obj));
Deserializer<E> deser =
serialization.getDeserializer(GenericsUtil.getClass(obj));
DataOutputBuffer dof = threadLocalDataOutputBuffer.get();
dof.reset();
ser.open(dof);
ser.serialize(obj);
ser.close();
obj = ReflectionUtils.newInstance(GenericsUtil.getClass(obj),
getChainJobConf());
ByteArrayInputStream bais =
new ByteArrayInputStream(dof.getData(), 0, dof.getLength());
deser.open(bais);
deser.deserialize(obj);
deser.close();
return obj;
}
示例3: deserialize
import org.apache.hadoop.io.serializer.Deserializer; //导入方法依赖的package包/类
/** Deserializes the object in the given datainput using
* available Hadoop serializations.
* @throws IOException */
public static<T> T deserialize(Configuration conf, DataInput in
, T obj , Class<T> objClass) throws IOException {
SerializationFactory serializationFactory = new SerializationFactory(getOrCreateConf(conf));
Deserializer<T> deserializer = serializationFactory.getDeserializer(
objClass);
int length = WritableUtils.readVInt(in);
byte[] arr = new byte[length];
in.readFully(arr);
List<ByteBuffer> list = new ArrayList<>();
list.add(ByteBuffer.wrap(arr));
try (ByteBufferInputStream is = new ByteBufferInputStream(list)) {
deserializer.open(is);
T newObj = deserializer.deserialize(obj);
return newObj;
}finally {
if(deserializer != null)
deserializer.close();
}
}
示例4: cloneObj
import org.apache.hadoop.io.serializer.Deserializer; //导入方法依赖的package包/类
private <T> T cloneObj(T t) throws IOException
{
Serializer<T> keySerializer;
Class<T> keyClass;
PipedInputStream pis = new PipedInputStream();
PipedOutputStream pos = new PipedOutputStream(pis);
keyClass = (Class<T>)t.getClass();
keySerializer = serializationFactory.getSerializer(keyClass);
keySerializer.open(pos);
keySerializer.serialize(t);
Deserializer<T> keyDesiralizer = serializationFactory.getDeserializer(keyClass);
keyDesiralizer.open(pis);
T clonedArg0 = keyDesiralizer.deserialize(null);
pos.close();
pis.close();
keySerializer.close();
keyDesiralizer.close();
return clonedArg0;
}
示例5: testSerializer
import org.apache.hadoop.io.serializer.Deserializer; //导入方法依赖的package包/类
@Test
public void testSerializer() throws Exception {
WebPage originalWebPage = new WebPage(new URL("http://www.jboss.org"), "opensource", 10L);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
JBossMarshallerSerialization<WebPage> marshallerSerialization = new JBossMarshallerSerialization<>();
Serializer<WebPage> serializer = marshallerSerialization.getSerializer(WebPage.class);
serializer.open(baos);
serializer.serialize(originalWebPage);
serializer.close();
Deserializer<WebPage> deserializer = marshallerSerialization.getDeserializer(WebPage.class);
deserializer.open(new ByteArrayInputStream(baos.toByteArray()));
WebPage deserializedWebPage = deserializer.deserialize(null);
deserializer.close();
assertEquals(deserializedWebPage, originalWebPage);
}
示例6: deserialize
import org.apache.hadoop.io.serializer.Deserializer; //导入方法依赖的package包/类
/**
* Deserializes the object in the given data input using
* available Hadoop serializations.
*
* @param conf Hadoop conf.
* @param in data input stream where serialized content is read.
* @param <T> object class type.
* @param obj data object.
* @param objClass object class type.
* @throws IOException occurred while deserializing the byte content.
* @return deserialized object.
*/
public static<T> T deserialize(Configuration conf, DataInput in
, T obj , Class<T> objClass) throws IOException {
SerializationFactory serializationFactory = new SerializationFactory(getOrCreateConf(conf));
Deserializer<T> deserializer = serializationFactory.getDeserializer(
objClass);
int length = WritableUtils.readVInt(in);
byte[] arr = new byte[length];
in.readFully(arr);
List<ByteBuffer> list = new ArrayList<>();
list.add(ByteBuffer.wrap(arr));
try (ByteBufferInputStream is = new ByteBufferInputStream(list)) {
deserializer.open(is);
T newObj = deserializer.deserialize(obj);
return newObj;
}finally {
if(deserializer != null)
deserializer.close();
}
}
示例7: createOldFormatSplitFromUserPayload
import org.apache.hadoop.io.serializer.Deserializer; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
public static InputSplit createOldFormatSplitFromUserPayload(
MRSplitProto splitProto, SerializationFactory serializationFactory)
throws IOException {
// This may not need to use serialization factory, since OldFormat
// always uses Writable to write splits.
Preconditions.checkNotNull(splitProto, "splitProto cannot be null");
String className = splitProto.getSplitClassName();
Class<InputSplit> clazz;
try {
clazz = (Class<InputSplit>) Class.forName(className);
} catch (ClassNotFoundException e) {
throw new IOException("Failed to load InputSplit class: [" + className + "]", e);
}
Deserializer<InputSplit> deserializer = serializationFactory
.getDeserializer(clazz);
deserializer.open(splitProto.getSplitBytes().newInput());
InputSplit inputSplit = deserializer.deserialize(null);
deserializer.close();
return inputSplit;
}
示例8: createNewFormatSplitFromUserPayload
import org.apache.hadoop.io.serializer.Deserializer; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
public static org.apache.hadoop.mapreduce.InputSplit createNewFormatSplitFromUserPayload(
MRSplitProto splitProto, SerializationFactory serializationFactory)
throws IOException {
Preconditions.checkNotNull(splitProto, "splitProto must be specified");
String className = splitProto.getSplitClassName();
Class<org.apache.hadoop.mapreduce.InputSplit> clazz;
try {
clazz = (Class<org.apache.hadoop.mapreduce.InputSplit>) Class
.forName(className);
} catch (ClassNotFoundException e) {
throw new IOException("Failed to load InputSplit class: [" + className + "]", e);
}
Deserializer<org.apache.hadoop.mapreduce.InputSplit> deserializer = serializationFactory
.getDeserializer(clazz);
deserializer.open(splitProto.getSplitBytes().newInput());
org.apache.hadoop.mapreduce.InputSplit inputSplit = deserializer
.deserialize(null);
deserializer.close();
return inputSplit;
}
示例9: createOldFormatSplitFromUserPayload
import org.apache.hadoop.io.serializer.Deserializer; //导入方法依赖的package包/类
/**
* Create an instance of {@link org.apache.hadoop.mapred.InputSplit} from the {@link
* org.apache.tez.mapreduce.input.MRInput} representation of a split.
*
* @param splitProto The {@link org.apache.tez.mapreduce.protos.MRRuntimeProtos.MRSplitProto}
* instance representing the split
* @param serializationFactory the serialization mechanism used to write out the split
* @return an instance of the split
* @throws java.io.IOException
*/
@SuppressWarnings("unchecked")
@InterfaceStability.Evolving
@InterfaceAudience.LimitedPrivate({"hive, pig"})
public static InputSplit createOldFormatSplitFromUserPayload(
MRRuntimeProtos.MRSplitProto splitProto, SerializationFactory serializationFactory)
throws IOException {
// This may not need to use serialization factory, since OldFormat
// always uses Writable to write splits.
Preconditions.checkNotNull(splitProto, "splitProto cannot be null");
String className = splitProto.getSplitClassName();
Class<InputSplit> clazz;
try {
clazz = (Class<InputSplit>) Class.forName(className);
} catch (ClassNotFoundException e) {
throw new IOException("Failed to load InputSplit class: [" + className + "]", e);
}
Deserializer<InputSplit> deserializer = serializationFactory
.getDeserializer(clazz);
deserializer.open(splitProto.getSplitBytes().newInput());
InputSplit inputSplit = deserializer.deserialize(null);
deserializer.close();
return inputSplit;
}
示例10: createNewFormatSplitFromUserPayload
import org.apache.hadoop.io.serializer.Deserializer; //导入方法依赖的package包/类
/**
* Create an instance of {@link org.apache.hadoop.mapreduce.InputSplit} from the {@link
* org.apache.tez.mapreduce.input.MRInput} representation of a split.
*
* @param splitProto The {@link org.apache.tez.mapreduce.protos.MRRuntimeProtos.MRSplitProto}
* instance representing the split
* @param serializationFactory the serialization mechanism used to write out the split
* @return an instance of the split
* @throws IOException
*/
@InterfaceStability.Evolving
@SuppressWarnings("unchecked")
public static org.apache.hadoop.mapreduce.InputSplit createNewFormatSplitFromUserPayload(
MRRuntimeProtos.MRSplitProto splitProto, SerializationFactory serializationFactory)
throws IOException {
Preconditions.checkNotNull(splitProto, "splitProto must be specified");
String className = splitProto.getSplitClassName();
Class<org.apache.hadoop.mapreduce.InputSplit> clazz;
try {
clazz = (Class<org.apache.hadoop.mapreduce.InputSplit>) Class
.forName(className);
} catch (ClassNotFoundException e) {
throw new IOException("Failed to load InputSplit class: [" + className + "]", e);
}
Deserializer<org.apache.hadoop.mapreduce.InputSplit> deserializer = serializationFactory
.getDeserializer(clazz);
deserializer.open(splitProto.getSplitBytes().newInput());
org.apache.hadoop.mapreduce.InputSplit inputSplit = deserializer
.deserialize(null);
deserializer.close();
return inputSplit;
}
示例11: close
import org.apache.hadoop.io.serializer.Deserializer; //导入方法依赖的package包/类
@Override
public void close() throws IOException {
for (Deserializer deserializer : deserializersUsed) {
if (deserializer != null) {
deserializer.close();
}
}
dataIn.close();
}