本文整理匯總了Java中org.apache.hadoop.io.serializer.SerializationFactory.getDeserializer方法的典型用法代碼示例。如果您正苦於以下問題:Java SerializationFactory.getDeserializer方法的具體用法?Java SerializationFactory.getDeserializer怎麽用?Java SerializationFactory.getDeserializer使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.io.serializer.SerializationFactory
的用法示例。
在下文中一共展示了SerializationFactory.getDeserializer方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: copy
import org.apache.hadoop.io.serializer.SerializationFactory; //導入方法依賴的package包/類
/**
* Make a copy of the writable object using serialization to a buffer
* @param src the object to copy from
* @param dst the object to copy into, which is destroyed
* @return dst param (the copy)
* @throws IOException
*/
@SuppressWarnings("unchecked")
public static <T> T copy(Configuration conf,
T src, T dst) throws IOException {
CopyInCopyOutBuffer buffer = CLONE_BUFFERS.get();
buffer.outBuffer.reset();
SerializationFactory factory = getFactory(conf);
Class<T> cls = (Class<T>) src.getClass();
Serializer<T> serializer = factory.getSerializer(cls);
serializer.open(buffer.outBuffer);
serializer.serialize(src);
buffer.moveData();
Deserializer<T> deserializer = factory.getDeserializer(cls);
deserializer.open(buffer.inBuffer);
dst = deserializer.deserialize(dst);
return dst;
}
示例2: copy
import org.apache.hadoop.io.serializer.SerializationFactory; //導入方法依賴的package包/類
/**
* Make a copy of the writable object using serialization to a buffer
* @param dst the object to copy from
* @param src the object to copy into, which is destroyed
* @throws IOException
*/
@SuppressWarnings("unchecked")
public static <T> T copy(Configuration conf,
T src, T dst) throws IOException {
CopyInCopyOutBuffer buffer = cloneBuffers.get();
buffer.outBuffer.reset();
SerializationFactory factory = getFactory(conf);
Class<T> cls = (Class<T>) src.getClass();
Serializer<T> serializer = factory.getSerializer(cls);
serializer.open(buffer.outBuffer);
serializer.serialize(src);
buffer.moveData();
Deserializer<T> deserializer = factory.getDeserializer(cls);
deserializer.open(buffer.inBuffer);
dst = deserializer.deserialize(dst);
return dst;
}
示例3: serDeser
import org.apache.hadoop.io.serializer.SerializationFactory; //導入方法依賴的package包/類
private <K> K serDeser(K conf) throws Exception {
SerializationFactory factory = new SerializationFactory(CONF);
Serializer<K> serializer =
factory.getSerializer(GenericsUtil.getClass(conf));
Deserializer<K> deserializer =
factory.getDeserializer(GenericsUtil.getClass(conf));
DataOutputBuffer out = new DataOutputBuffer();
serializer.open(out);
serializer.serialize(conf);
serializer.close();
DataInputBuffer in = new DataInputBuffer();
in.reset(out.getData(), out.getLength());
deserializer.open(in);
K after = deserializer.deserialize(null);
deserializer.close();
return after;
}
示例4: ValuesIterator
import org.apache.hadoop.io.serializer.SerializationFactory; //導入方法依賴的package包/類
public ValuesIterator (RawKeyValueIterator in,
RawComparator<KEY> comparator,
Class<KEY> keyClass,
Class<VALUE> valClass, Configuration conf,
Progressable reporter)
throws IOException {
this.in = in;
this.comparator = comparator;
this.reporter = reporter;
SerializationFactory serializationFactory = new SerializationFactory(conf);
this.keyDeserializer = serializationFactory.getDeserializer(keyClass);
this.keyDeserializer.open(keyIn);
this.valDeserializer = serializationFactory.getDeserializer(valClass);
this.valDeserializer.open(this.valueIn);
readNextKey();
key = nextKey;
nextKey = null; // force new instance creation
hasNext = more;
}
示例5: copy
import org.apache.hadoop.io.serializer.SerializationFactory; //導入方法依賴的package包/類
/**
* Make a copy of the writable object using serialization to a buffer
* @param src the object to copy from
* @param dst the object to copy into, which is destroyed
* @return dst param (the copy)
* @throws IOException
*/
@SuppressWarnings("unchecked")
public static <T> T copy(Configuration conf,
T src, T dst) throws IOException {
CopyInCopyOutBuffer buffer = cloneBuffers.get();
buffer.outBuffer.reset();
SerializationFactory factory = getFactory(conf);
Class<T> cls = (Class<T>) src.getClass();
Serializer<T> serializer = factory.getSerializer(cls);
serializer.open(buffer.outBuffer);
serializer.serialize(src);
buffer.moveData();
Deserializer<T> deserializer = factory.getDeserializer(cls);
deserializer.open(buffer.inBuffer);
dst = deserializer.deserialize(dst);
return dst;
}
示例6: deserialize
import org.apache.hadoop.io.serializer.SerializationFactory; //導入方法依賴的package包/類
/** Deserializes the object in the given datainput using
* available Hadoop serializations.
* @throws IOException */
public static<T> T deserialize(Configuration conf, DataInput in
, T obj , Class<T> objClass) throws IOException {
SerializationFactory serializationFactory = new SerializationFactory(getOrCreateConf(conf));
Deserializer<T> deserializer = serializationFactory.getDeserializer(
objClass);
int length = WritableUtils.readVInt(in);
byte[] arr = new byte[length];
in.readFully(arr);
List<ByteBuffer> list = new ArrayList<>();
list.add(ByteBuffer.wrap(arr));
try (ByteBufferInputStream is = new ByteBufferInputStream(list)) {
deserializer.open(is);
T newObj = deserializer.deserialize(obj);
return newObj;
}finally {
if(deserializer != null)
deserializer.close();
}
}
示例7: ReduceContext
import org.apache.hadoop.io.serializer.SerializationFactory; //導入方法依賴的package包/類
public ReduceContext(Configuration conf, TaskAttemptID taskid,
RawKeyValueIterator input,
Counter inputKeyCounter,
Counter inputValueCounter,
RecordWriter<KEYOUT,VALUEOUT> output,
OutputCommitter committer,
StatusReporter reporter,
RawComparator<KEYIN> comparator,
Class<KEYIN> keyClass,
Class<VALUEIN> valueClass
) throws InterruptedException, IOException{
super(conf, taskid, output, committer, reporter);
this.input = input;
this.inputKeyCounter = inputKeyCounter;
this.inputValueCounter = inputValueCounter;
this.comparator = comparator;
SerializationFactory serializationFactory = new SerializationFactory(conf);
this.keyDeserializer = serializationFactory.getDeserializer(keyClass);
this.keyDeserializer.open(buffer);
this.valueDeserializer = serializationFactory.getDeserializer(valueClass);
this.valueDeserializer.open(buffer);
hasMore = input.next();
}
示例8: getSplitDetails
import org.apache.hadoop.io.serializer.SerializationFactory; //導入方法依賴的package包/類
@SuppressWarnings("unchecked")
private static <T> T getSplitDetails(FSDataInputStream inFile, long offset, Configuration configuration)
throws IOException {
inFile.seek(offset);
String className = StringInterner.weakIntern(Text.readString(inFile));
Class<T> cls;
try {
cls = (Class<T>) configuration.getClassByName(className);
} catch (ClassNotFoundException ce) {
IOException wrap = new IOException("Split class " + className +
" not found");
wrap.initCause(ce);
throw wrap;
}
SerializationFactory factory = new SerializationFactory(configuration);
Deserializer<T> deserializer =
(Deserializer<T>) factory.getDeserializer(cls);
deserializer.open(inFile);
T split = deserializer.deserialize(null);
return split;
}
示例9: BlockIterator
import org.apache.hadoop.io.serializer.SerializationFactory; //導入方法依賴的package包/類
public BlockIterator(Configuration conf,
InputStream inStream,
Class<V> valueClass,
BlockSerializationType serializationType,
BlockSchema schema) throws IOException,
InstantiationException,
IllegalAccessException
{
switch (serializationType)
{
case DEFAULT:
SerializationFactory serializationFactory = new SerializationFactory(conf);
deserializer = serializationFactory.getDeserializer(valueClass);
break;
case COMPACT:
deserializer = new CompactDeserializer<V>(schema);
break;
default:
deserializer = null;
}
deserializer.open(inStream);
}
示例10: getSplitDetails
import org.apache.hadoop.io.serializer.SerializationFactory; //導入方法依賴的package包/類
@SuppressWarnings({ "unchecked", "unused" })
private <T> T getSplitDetails(Path file, long offset) throws IOException {
FileSystem fs = file.getFileSystem(getYarnConf());
FSDataInputStream inFile = null;
T split = null;
try {
inFile = fs.open(file);
inFile.seek(offset);
String className = Text.readString(inFile);
Class<T> cls;
try {
cls = (Class<T>) getYarnConf().getClassByName(className);
} catch (ClassNotFoundException ce) {
IOException wrap = new IOException(String.format("Split class %s not found", className));
wrap.initCause(ce);
throw wrap;
}
SerializationFactory factory = new SerializationFactory(getYarnConf());
Deserializer<T> deserializer = (Deserializer<T>) factory.getDeserializer(cls);
deserializer.open(inFile);
split = deserializer.deserialize(null);
} finally {
IOUtils.closeStream(inFile);
}
return split;
}
示例11: deserialize
import org.apache.hadoop.io.serializer.SerializationFactory; //導入方法依賴的package包/類
/**
* Deserializes the object in the given data input using
* available Hadoop serializations.
*
* @param conf Hadoop conf.
* @param in data input stream where serialized content is read.
* @param <T> object class type.
* @param obj data object.
* @param objClass object class type.
* @throws IOException occurred while deserializing the byte content.
* @return deserialized object.
*/
public static<T> T deserialize(Configuration conf, DataInput in
, T obj , Class<T> objClass) throws IOException {
SerializationFactory serializationFactory = new SerializationFactory(getOrCreateConf(conf));
Deserializer<T> deserializer = serializationFactory.getDeserializer(
objClass);
int length = WritableUtils.readVInt(in);
byte[] arr = new byte[length];
in.readFully(arr);
List<ByteBuffer> list = new ArrayList<>();
list.add(ByteBuffer.wrap(arr));
try (ByteBufferInputStream is = new ByteBufferInputStream(list)) {
deserializer.open(is);
T newObj = deserializer.deserialize(obj);
return newObj;
}finally {
if(deserializer != null)
deserializer.close();
}
}
示例12: ValuesIterator
import org.apache.hadoop.io.serializer.SerializationFactory; //導入方法依賴的package包/類
public ValuesIterator (TezRawKeyValueIterator in,
RawComparator<KEY> comparator,
Class<KEY> keyClass,
Class<VALUE> valClass, Configuration conf,
TezCounter inputKeyCounter,
TezCounter inputValueCounter)
throws IOException {
this.in = in;
this.comparator = comparator;
this.inputKeyCounter = inputKeyCounter;
this.inputValueCounter = inputValueCounter;
SerializationFactory serializationFactory = new SerializationFactory(conf);
this.keyDeserializer = serializationFactory.getDeserializer(keyClass);
this.keyDeserializer.open(keyIn);
this.valDeserializer = serializationFactory.getDeserializer(valClass);
this.valDeserializer.open(this.valueIn);
}
示例13: createOldFormatSplitFromUserPayload
import org.apache.hadoop.io.serializer.SerializationFactory; //導入方法依賴的package包/類
@SuppressWarnings("unchecked")
public static InputSplit createOldFormatSplitFromUserPayload(
MRSplitProto splitProto, SerializationFactory serializationFactory)
throws IOException {
// This may not need to use serialization factory, since OldFormat
// always uses Writable to write splits.
Preconditions.checkNotNull(splitProto, "splitProto cannot be null");
String className = splitProto.getSplitClassName();
Class<InputSplit> clazz;
try {
clazz = (Class<InputSplit>) Class.forName(className);
} catch (ClassNotFoundException e) {
throw new IOException("Failed to load InputSplit class: [" + className + "]", e);
}
Deserializer<InputSplit> deserializer = serializationFactory
.getDeserializer(clazz);
deserializer.open(splitProto.getSplitBytes().newInput());
InputSplit inputSplit = deserializer.deserialize(null);
deserializer.close();
return inputSplit;
}
示例14: createNewFormatSplitFromUserPayload
import org.apache.hadoop.io.serializer.SerializationFactory; //導入方法依賴的package包/類
@SuppressWarnings("unchecked")
public static org.apache.hadoop.mapreduce.InputSplit createNewFormatSplitFromUserPayload(
MRSplitProto splitProto, SerializationFactory serializationFactory)
throws IOException {
Preconditions.checkNotNull(splitProto, "splitProto must be specified");
String className = splitProto.getSplitClassName();
Class<org.apache.hadoop.mapreduce.InputSplit> clazz;
try {
clazz = (Class<org.apache.hadoop.mapreduce.InputSplit>) Class
.forName(className);
} catch (ClassNotFoundException e) {
throw new IOException("Failed to load InputSplit class: [" + className + "]", e);
}
Deserializer<org.apache.hadoop.mapreduce.InputSplit> deserializer = serializationFactory
.getDeserializer(clazz);
deserializer.open(splitProto.getSplitBytes().newInput());
org.apache.hadoop.mapreduce.InputSplit inputSplit = deserializer
.deserialize(null);
deserializer.close();
return inputSplit;
}
示例15: DefaultStringifier
import org.apache.hadoop.io.serializer.SerializationFactory; //導入方法依賴的package包/類
public DefaultStringifier(Configuration conf, Class<T> c) {
SerializationFactory factory = new SerializationFactory(conf);
this.serializer = factory.getSerializer(c);
this.deserializer = factory.getDeserializer(c);
this.inBuf = new DataInputBuffer();
this.outBuf = new DataOutputBuffer();
try {
serializer.open(outBuf);
deserializer.open(inBuf);
} catch (IOException ex) {
throw new RuntimeException(ex);
}
}