当前位置: 首页>>代码示例>>Java>>正文


Java Deserializer.deserialize方法代码示例

本文整理汇总了Java中org.apache.hadoop.io.serializer.Deserializer.deserialize方法的典型用法代码示例。如果您正苦于以下问题:Java Deserializer.deserialize方法的具体用法?Java Deserializer.deserialize怎么用?Java Deserializer.deserialize使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.io.serializer.Deserializer的用法示例。


在下文中一共展示了Deserializer.deserialize方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: copy

import org.apache.hadoop.io.serializer.Deserializer; //导入方法依赖的package包/类
/**
 * Make a copy of the writable object using serialization to a buffer
 * @param src the object to copy from
 * @param dst the object to copy into, which is destroyed
 * @return dst param (the copy)
 * @throws IOException
 */
@SuppressWarnings("unchecked")
public static <T> T copy(Configuration conf, 
                              T src, T dst) throws IOException {
  CopyInCopyOutBuffer buffer = CLONE_BUFFERS.get();
  buffer.outBuffer.reset();
  SerializationFactory factory = getFactory(conf);
  Class<T> cls = (Class<T>) src.getClass();
  Serializer<T> serializer = factory.getSerializer(cls);
  serializer.open(buffer.outBuffer);
  serializer.serialize(src);
  buffer.moveData();
  Deserializer<T> deserializer = factory.getDeserializer(cls);
  deserializer.open(buffer.inBuffer);
  dst = deserializer.deserialize(dst);
  return dst;
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:24,代码来源:ReflectionUtils.java

示例2: copy

import org.apache.hadoop.io.serializer.Deserializer; //导入方法依赖的package包/类
/**
 * Make a copy of the writable object using serialization to a buffer
 * @param dst the object to copy from
 * @param src the object to copy into, which is destroyed
 * @throws IOException
 */
@SuppressWarnings("unchecked")
public static <T> T copy(Configuration conf, 
                              T src, T dst) throws IOException {
  CopyInCopyOutBuffer buffer = cloneBuffers.get();
  buffer.outBuffer.reset();
  SerializationFactory factory = getFactory(conf);
  Class<T> cls = (Class<T>) src.getClass();
  Serializer<T> serializer = factory.getSerializer(cls);
  serializer.open(buffer.outBuffer);
  serializer.serialize(src);
  buffer.moveData();
  Deserializer<T> deserializer = factory.getDeserializer(cls);
  deserializer.open(buffer.inBuffer);
  dst = deserializer.deserialize(dst);
  return dst;
}
 
开发者ID:spafka,项目名称:spark_deep,代码行数:23,代码来源:ReflectionUtils.java

示例3: serDeser

import org.apache.hadoop.io.serializer.Deserializer; //导入方法依赖的package包/类
private <K> K serDeser(K conf) throws Exception {
  SerializationFactory factory = new SerializationFactory(CONF);
  Serializer<K> serializer =
    factory.getSerializer(GenericsUtil.getClass(conf));
  Deserializer<K> deserializer =
    factory.getDeserializer(GenericsUtil.getClass(conf));

  DataOutputBuffer out = new DataOutputBuffer();
  serializer.open(out);
  serializer.serialize(conf);
  serializer.close();

  DataInputBuffer in = new DataInputBuffer();
  in.reset(out.getData(), out.getLength());
  deserializer.open(in);
  K after = deserializer.deserialize(null);
  deserializer.close();
  return after;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestWritableJobConf.java

示例4: makeCopyForPassByValue

import org.apache.hadoop.io.serializer.Deserializer; //导入方法依赖的package包/类
private <E> E makeCopyForPassByValue(Serialization<E> serialization,
                                      E obj) throws IOException {
  Serializer<E> ser =
    serialization.getSerializer(GenericsUtil.getClass(obj));
  Deserializer<E> deser =
    serialization.getDeserializer(GenericsUtil.getClass(obj));

  DataOutputBuffer dof = threadLocalDataOutputBuffer.get();

  dof.reset();
  ser.open(dof);
  ser.serialize(obj);
  ser.close();
  obj = ReflectionUtils.newInstance(GenericsUtil.getClass(obj),
                                    getChainJobConf());
  ByteArrayInputStream bais =
    new ByteArrayInputStream(dof.getData(), 0, dof.getLength());
  deser.open(bais);
  deser.deserialize(obj);
  deser.close();
  return obj;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:Chain.java

示例5: copy

import org.apache.hadoop.io.serializer.Deserializer; //导入方法依赖的package包/类
/**
 * Make a copy of the writable object using serialization to a buffer
 * @param src the object to copy from
 * @param dst the object to copy into, which is destroyed
 * @return dst param (the copy)
 * @throws IOException
 */
@SuppressWarnings("unchecked")
public static <T> T copy(Configuration conf, 
                              T src, T dst) throws IOException {
  CopyInCopyOutBuffer buffer = cloneBuffers.get();
  buffer.outBuffer.reset();
  SerializationFactory factory = getFactory(conf);
  Class<T> cls = (Class<T>) src.getClass();
  Serializer<T> serializer = factory.getSerializer(cls);
  serializer.open(buffer.outBuffer);
  serializer.serialize(src);
  buffer.moveData();
  Deserializer<T> deserializer = factory.getDeserializer(cls);
  deserializer.open(buffer.inBuffer);
  dst = deserializer.deserialize(dst);
  return dst;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:ReflectionUtils.java

示例6: deserialize

import org.apache.hadoop.io.serializer.Deserializer; //导入方法依赖的package包/类
/** Deserializes the object in the given datainput using
 * available Hadoop serializations.
 * @throws IOException */
public static<T> T deserialize(Configuration conf, DataInput in
    , T obj , Class<T> objClass) throws IOException {
  SerializationFactory serializationFactory = new SerializationFactory(getOrCreateConf(conf));
  Deserializer<T> deserializer = serializationFactory.getDeserializer(
      objClass);

  int length = WritableUtils.readVInt(in);
  byte[] arr = new byte[length];
  in.readFully(arr);
  List<ByteBuffer> list = new ArrayList<>();
  list.add(ByteBuffer.wrap(arr));

  try (ByteBufferInputStream is = new ByteBufferInputStream(list)) {
    deserializer.open(is);
    T newObj = deserializer.deserialize(obj);
    return newObj;

  }finally {
    if(deserializer != null)
      deserializer.close();
  }
}
 
开发者ID:jianglibo,项目名称:gora-boot,代码行数:26,代码来源:IOUtils.java

示例7: cloneObj

import org.apache.hadoop.io.serializer.Deserializer; //导入方法依赖的package包/类
private <T> T cloneObj(T t) throws IOException
{
  Serializer<T> keySerializer;
  Class<T> keyClass;
  PipedInputStream pis = new PipedInputStream();
  PipedOutputStream pos = new PipedOutputStream(pis);
  keyClass = (Class<T>)t.getClass();
  keySerializer = serializationFactory.getSerializer(keyClass);
  keySerializer.open(pos);
  keySerializer.serialize(t);
  Deserializer<T> keyDesiralizer = serializationFactory.getDeserializer(keyClass);
  keyDesiralizer.open(pis);
  T clonedArg0 = keyDesiralizer.deserialize(null);
  pos.close();
  pis.close();
  keySerializer.close();
  keyDesiralizer.close();
  return clonedArg0;

}
 
开发者ID:apache,项目名称:apex-malhar,代码行数:21,代码来源:OutputCollectorImpl.java

示例8: getSplitDetails

import org.apache.hadoop.io.serializer.Deserializer; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
private static <T> T getSplitDetails(FSDataInputStream inFile, long offset, Configuration configuration)
        throws IOException {
    inFile.seek(offset);
    String className = StringInterner.weakIntern(Text.readString(inFile));
    Class<T> cls;
    try {
        cls = (Class<T>) configuration.getClassByName(className);
    } catch (ClassNotFoundException ce) {
        IOException wrap = new IOException("Split class " + className +
                " not found");
        wrap.initCause(ce);
        throw wrap;
    }
    SerializationFactory factory = new SerializationFactory(configuration);
    Deserializer<T> deserializer =
            (Deserializer<T>) factory.getDeserializer(cls);
    deserializer.open(inFile);
    T split = deserializer.deserialize(null);
    return split;
}
 
开发者ID:scaleoutsoftware,项目名称:hServer,代码行数:22,代码来源:SubmittedJob.java

示例9: createOldFormatSplitFromUserPayload

import org.apache.hadoop.io.serializer.Deserializer; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
public static InputSplit createOldFormatSplitFromUserPayload(
    MRSplitProto splitProto, SerializationFactory serializationFactory)
    throws IOException {
  // This may not need to use serialization factory, since OldFormat
  // always uses Writable to write splits.
  Preconditions.checkNotNull(splitProto, "splitProto cannot be null");
  String className = splitProto.getSplitClassName();
  Class<InputSplit> clazz;

  try {
    clazz = (Class<InputSplit>) Class.forName(className);
  } catch (ClassNotFoundException e) {
    throw new IOException("Failed to load InputSplit class: [" + className + "]", e);
  }

  Deserializer<InputSplit> deserializer = serializationFactory
      .getDeserializer(clazz);
  deserializer.open(splitProto.getSplitBytes().newInput());
  InputSplit inputSplit = deserializer.deserialize(null);
  deserializer.close();
  return inputSplit;
}
 
开发者ID:apache,项目名称:incubator-tez,代码行数:24,代码来源:MRHelpers.java

示例10: createOldFormatSplitFromUserPayload

import org.apache.hadoop.io.serializer.Deserializer; //导入方法依赖的package包/类
/**
 * Create an instance of {@link org.apache.hadoop.mapred.InputSplit} from the {@link
 * org.apache.tez.mapreduce.input.MRInput} representation of a split.
 *
 * @param splitProto           The {@link org.apache.tez.mapreduce.protos.MRRuntimeProtos.MRSplitProto}
 *                             instance representing the split
 * @param serializationFactory the serialization mechanism used to write out the split
 * @return an instance of the split
 * @throws java.io.IOException
 */
@SuppressWarnings("unchecked")
@InterfaceStability.Evolving
@InterfaceAudience.LimitedPrivate({"hive, pig"})
public static InputSplit createOldFormatSplitFromUserPayload(
    MRRuntimeProtos.MRSplitProto splitProto, SerializationFactory serializationFactory)
    throws IOException {
  // This may not need to use serialization factory, since OldFormat
  // always uses Writable to write splits.
  Preconditions.checkNotNull(splitProto, "splitProto cannot be null");
  String className = splitProto.getSplitClassName();
  Class<InputSplit> clazz;

  try {
    clazz = (Class<InputSplit>) Class.forName(className);
  } catch (ClassNotFoundException e) {
    throw new IOException("Failed to load InputSplit class: [" + className + "]", e);
  }

  Deserializer<InputSplit> deserializer = serializationFactory
      .getDeserializer(clazz);
  deserializer.open(splitProto.getSplitBytes().newInput());
  InputSplit inputSplit = deserializer.deserialize(null);
  deserializer.close();
  return inputSplit;
}
 
开发者ID:apache,项目名称:tez,代码行数:36,代码来源:MRInputHelpers.java

示例11: getSplitDetails

import org.apache.hadoop.io.serializer.Deserializer; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
private <T> T getSplitDetails(Path file, long offset) 
 throws IOException {
  FileSystem fs = file.getFileSystem(conf);
  FSDataInputStream inFile = fs.open(file);
  inFile.seek(offset);
  String className = StringInterner.weakIntern(Text.readString(inFile));
  Class<T> cls;
  try {
    cls = (Class<T>) conf.getClassByName(className);
  } catch (ClassNotFoundException ce) {
    IOException wrap = new IOException("Split class " + className + 
                                        " not found");
    wrap.initCause(ce);
    throw wrap;
  }
  SerializationFactory factory = new SerializationFactory(conf);
  Deserializer<T> deserializer = 
    (Deserializer<T>) factory.getDeserializer(cls);
  deserializer.open(inFile);
  T split = deserializer.deserialize(null);
  long pos = inFile.getPos();
  getCounters().findCounter(
      TaskCounter.SPLIT_RAW_BYTES).increment(pos - offset);
  inFile.close();
  return split;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:28,代码来源:MapTask.java

示例12: readFields

import org.apache.hadoop.io.serializer.Deserializer; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
public void readFields(DataInput in) throws IOException {
  inputSplitClass = (Class<? extends InputSplit>) readClass(in);
  inputFormatClass = (Class<? extends InputFormat<?, ?>>) readClass(in);
  mapperClass = (Class<? extends Mapper<?, ?, ?, ?>>) readClass(in);
  inputSplit = (InputSplit) ReflectionUtils
     .newInstance(inputSplitClass, conf);
  SerializationFactory factory = new SerializationFactory(conf);
  Deserializer deserializer = factory.getDeserializer(inputSplitClass);
  deserializer.open((DataInputStream)in);
  inputSplit = (InputSplit)deserializer.deserialize(inputSplit);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:13,代码来源:TaggedInputSplit.java

示例13: getSplitDetails

import org.apache.hadoop.io.serializer.Deserializer; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
private <T> T getSplitDetails(Path file, long offset)
 throws IOException {
  FileSystem fs = file.getFileSystem(conf);
  FSDataInputStream inFile = fs.open(file);
  inFile.seek(offset);
  String className = Text.readString(inFile);
  Class<T> cls;
  try {
    cls = (Class<T>) conf.getClassByName(className);
  } catch (ClassNotFoundException ce) {
    IOException wrap = new IOException("Split class " + className +
                                        " not found");
    wrap.initCause(ce);
    throw wrap;
  }
  SerializationFactory factory = new SerializationFactory(conf);
  Deserializer<T> deserializer =
    (Deserializer<T>) factory.getDeserializer(cls);
  deserializer.open(inFile);
  T split = deserializer.deserialize(null);
  long pos = inFile.getPos();
  getCounters().findCounter(
       Task.Counter.SPLIT_RAW_BYTES).increment(pos - offset);
  inFile.close();
  return split;
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:28,代码来源:MapTask.java

示例14: readFields

import org.apache.hadoop.io.serializer.Deserializer; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
@Override
public void readFields(DataInput in) throws IOException
{
    int keyBytesLen = in.readInt();
    byte[] keyBytes = new byte[keyBytesLen];
    in.readFully(keyBytes, 0, keyBytesLen);

    filename = new Path(in.readUTF());
    offset = in.readLong();
    length = in.readLong();
    blockId = in.readLong();
    numRecords = in.readLong();
    try
    {
        keyClass = (Class<K>) ClassCache.forName(in.readUTF());
        valueClass = (Class<V>) ClassCache.forName(in.readUTF());

        SerializationFactory serializationFactory = new SerializationFactory(conf);
        Deserializer<K> keyDeserializer =
                serializationFactory.getDeserializer(keyClass);

        ByteArrayInputStream bis = new ByteArrayInputStream(keyBytes);
        keyDeserializer.open(bis);

        key = keyDeserializer.deserialize(null);

        ObjectMapper mapper = new ObjectMapper();
        schema = new BlockSchema(mapper.readValue(in.readUTF(), JsonNode.class));
        blockSerializationType = BlockSerializationType.values()[in.readInt()];
    }
    catch (ClassNotFoundException e)
    {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }
}
 
开发者ID:linkedin,项目名称:Cubert,代码行数:38,代码来源:RubixInputSplit.java

示例15: readFields

import org.apache.hadoop.io.serializer.Deserializer; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
public void readFields(DataInput is) throws IOException {
    disableCounter = is.readBoolean();
    isMultiInputs = is.readBoolean();
    totalSplits = is.readInt();
    splitIndex = is.readInt();
    inputIndex = is.readInt();
    targetOps = (ArrayList<OperatorKey>) readObject(is);
    int splitLen = is.readInt();
    int distinctSplitClassCount = is.readInt();
    //construct the input split class name list
    String[] distinctSplitClassName = new String[distinctSplitClassCount];
    for (int i = 0; i < distinctSplitClassCount; i++) {
        distinctSplitClassName[i] = is.readUTF();
    }
    try {
        SerializationFactory sf = new SerializationFactory(conf);
        // The correct call sequence for Deserializer is, we shall open, then deserialize, but we shall not close
        wrappedSplits = new InputSplit[splitLen];
        for (int i = 0; i < splitLen; i++)
        {
            //read the className index
            int index = is.readInt();
            //get the split class name
            String splitClassName = distinctSplitClassName[index];
            Class splitClass = conf.getClassByName(splitClassName);
            Deserializer d = sf.getDeserializer(splitClass);
            d.open((InputStream) is);
            wrappedSplits[i] = (InputSplit)ReflectionUtils.newInstance(splitClass, conf);
            d.deserialize(wrappedSplits[i]);
        }
    } catch (ClassNotFoundException e) {
        throw new IOException(e);
    }
}
 
开发者ID:sigmoidanalytics,项目名称:spork,代码行数:36,代码来源:PigSplit.java


注:本文中的org.apache.hadoop.io.serializer.Deserializer.deserialize方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。