当前位置: 首页>>代码示例>>Java>>正文


Java Serializer.open方法代码示例

本文整理汇总了Java中org.apache.hadoop.io.serializer.Serializer.open方法的典型用法代码示例。如果您正苦于以下问题:Java Serializer.open方法的具体用法?Java Serializer.open怎么用?Java Serializer.open使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.io.serializer.Serializer的用法示例。


在下文中一共展示了Serializer.open方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: copy

import org.apache.hadoop.io.serializer.Serializer; //导入方法依赖的package包/类
/**
 * Make a copy of the writable object using serialization to a buffer
 * @param src the object to copy from
 * @param dst the object to copy into, which is destroyed
 * @return dst param (the copy)
 * @throws IOException
 */
@SuppressWarnings("unchecked")
public static <T> T copy(Configuration conf, 
                              T src, T dst) throws IOException {
  CopyInCopyOutBuffer buffer = CLONE_BUFFERS.get();
  buffer.outBuffer.reset();
  SerializationFactory factory = getFactory(conf);
  Class<T> cls = (Class<T>) src.getClass();
  Serializer<T> serializer = factory.getSerializer(cls);
  serializer.open(buffer.outBuffer);
  serializer.serialize(src);
  buffer.moveData();
  Deserializer<T> deserializer = factory.getDeserializer(cls);
  deserializer.open(buffer.inBuffer);
  dst = deserializer.deserialize(dst);
  return dst;
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:24,代码来源:ReflectionUtils.java

示例2: copy

import org.apache.hadoop.io.serializer.Serializer; //导入方法依赖的package包/类
/**
 * Make a copy of the writable object using serialization to a buffer
 * @param dst the object to copy from
 * @param src the object to copy into, which is destroyed
 * @throws IOException
 */
@SuppressWarnings("unchecked")
public static <T> T copy(Configuration conf, 
                              T src, T dst) throws IOException {
  CopyInCopyOutBuffer buffer = cloneBuffers.get();
  buffer.outBuffer.reset();
  SerializationFactory factory = getFactory(conf);
  Class<T> cls = (Class<T>) src.getClass();
  Serializer<T> serializer = factory.getSerializer(cls);
  serializer.open(buffer.outBuffer);
  serializer.serialize(src);
  buffer.moveData();
  Deserializer<T> deserializer = factory.getDeserializer(cls);
  deserializer.open(buffer.inBuffer);
  dst = deserializer.deserialize(dst);
  return dst;
}
 
开发者ID:spafka,项目名称:spark_deep,代码行数:23,代码来源:ReflectionUtils.java

示例3: serDeser

import org.apache.hadoop.io.serializer.Serializer; //导入方法依赖的package包/类
private <K> K serDeser(K conf) throws Exception {
  SerializationFactory factory = new SerializationFactory(CONF);
  Serializer<K> serializer =
    factory.getSerializer(GenericsUtil.getClass(conf));
  Deserializer<K> deserializer =
    factory.getDeserializer(GenericsUtil.getClass(conf));

  DataOutputBuffer out = new DataOutputBuffer();
  serializer.open(out);
  serializer.serialize(conf);
  serializer.close();

  DataInputBuffer in = new DataInputBuffer();
  in.reset(out.getData(), out.getLength());
  deserializer.open(in);
  K after = deserializer.deserialize(null);
  deserializer.close();
  return after;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestWritableJobConf.java

示例4: makeCopyForPassByValue

import org.apache.hadoop.io.serializer.Serializer; //导入方法依赖的package包/类
private <E> E makeCopyForPassByValue(Serialization<E> serialization,
                                      E obj) throws IOException {
  Serializer<E> ser =
    serialization.getSerializer(GenericsUtil.getClass(obj));
  Deserializer<E> deser =
    serialization.getDeserializer(GenericsUtil.getClass(obj));

  DataOutputBuffer dof = threadLocalDataOutputBuffer.get();

  dof.reset();
  ser.open(dof);
  ser.serialize(obj);
  ser.close();
  obj = ReflectionUtils.newInstance(GenericsUtil.getClass(obj),
                                    getChainJobConf());
  ByteArrayInputStream bais =
    new ByteArrayInputStream(dof.getData(), 0, dof.getLength());
  deser.open(bais);
  deser.deserialize(obj);
  deser.close();
  return obj;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:Chain.java

示例5: writeFirstKeyValueBytes

import org.apache.hadoop.io.serializer.Serializer; //导入方法依赖的package包/类
/**
 * This method is called to write the record that was most recently
 * served (before a call to the mark). Since the framework reads one
 * record in advance, to get this record, we serialize the current key
 * and value
 * @param out
 * @throws IOException
 */
private void writeFirstKeyValueBytes(DataOutputStream out) 
throws IOException {
  assert (getCurrentKey() != null && getCurrentValue() != null);
  WritableUtils.writeVInt(out, currentKeyLength);
  WritableUtils.writeVInt(out, currentValueLength);
  Serializer<KEYIN> keySerializer = 
    serializationFactory.getSerializer(keyClass);
  keySerializer.open(out);
  keySerializer.serialize(getCurrentKey());

  Serializer<VALUEIN> valueSerializer = 
    serializationFactory.getSerializer(valueClass);
  valueSerializer.open(out);
  valueSerializer.serialize(getCurrentValue());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:ReduceContextImpl.java

示例6: copy

import org.apache.hadoop.io.serializer.Serializer; //导入方法依赖的package包/类
/**
 * Make a copy of the writable object using serialization to a buffer
 * @param src the object to copy from
 * @param dst the object to copy into, which is destroyed
 * @return dst param (the copy)
 * @throws IOException
 */
@SuppressWarnings("unchecked")
public static <T> T copy(Configuration conf, 
                              T src, T dst) throws IOException {
  CopyInCopyOutBuffer buffer = cloneBuffers.get();
  buffer.outBuffer.reset();
  SerializationFactory factory = getFactory(conf);
  Class<T> cls = (Class<T>) src.getClass();
  Serializer<T> serializer = factory.getSerializer(cls);
  serializer.open(buffer.outBuffer);
  serializer.serialize(src);
  buffer.moveData();
  Deserializer<T> deserializer = factory.getDeserializer(cls);
  deserializer.open(buffer.inBuffer);
  dst = deserializer.deserialize(dst);
  return dst;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:ReflectionUtils.java

示例7: configureSerializer

import org.apache.hadoop.io.serializer.Serializer; //导入方法依赖的package包/类
@SuppressWarnings({ "rawtypes", "unchecked" })
void configureSerializer(String confKey, Configuration conf) {
  Class clientInputSerializerClass = conf.getClass(confKey, null);
  if (clientInputSerializerClass != null) {
    LOG.info("Using custom serializer: " + clientInputSerializerClass.getName());
    clientInputSerializer = 
        (Serializer) ReflectionUtils.newInstance(clientInputSerializerClass, conf);

    try {
      clientInputSerializer.open(clientOut_);
    } catch (IOException e) {
      LOG.error("Could not open serializer", e);
      throw new RuntimeException(e);
    }
  } else {
    LOG.info("Not using a custom serializer");
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:19,代码来源:PipeMapRed.java

示例8: write

import org.apache.hadoop.io.serializer.Serializer; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
public void write(DataOutput os) throws IOException {
    os.writeBoolean(disableCounter);
    os.writeBoolean(isMultiInputs);
    os.writeInt(totalSplits);
    os.writeInt(splitIndex);
    os.writeInt(inputIndex);
    writeObject(targetOps, os);
    os.writeInt(wrappedSplits.length);
    os.writeUTF(wrappedSplits[0].getClass().getName());
    SerializationFactory sf = new SerializationFactory(conf);
    Serializer s = 
        sf.getSerializer(wrappedSplits[0].getClass());
    s.open((OutputStream) os);
    for (int i = 0; i < wrappedSplits.length; i++)
    {
        // The correct call sequence for Serializer is, we shall open, then serialize, but we shall not close
        s.serialize(wrappedSplits[i]);
    }
    
}
 
开发者ID:kaituo,项目名称:sedge,代码行数:22,代码来源:PigSplit.java

示例9: createSplitProto

import org.apache.hadoop.io.serializer.Serializer; //导入方法依赖的package包/类
@InterfaceStability.Evolving
public static <T extends org.apache.hadoop.mapreduce.InputSplit> MRRuntimeProtos.MRSplitProto createSplitProto(
    T newSplit, SerializationFactory serializationFactory)
    throws IOException, InterruptedException {
  MRRuntimeProtos.MRSplitProto.Builder builder = MRRuntimeProtos.MRSplitProto
      .newBuilder();

  builder.setSplitClassName(newSplit.getClass().getName());

  @SuppressWarnings("unchecked")
  Serializer<T> serializer = serializationFactory
      .getSerializer((Class<T>) newSplit.getClass());
  ByteString.Output out = ByteString
      .newOutput(SPLIT_SERIALIZED_LENGTH_ESTIMATE);
  serializer.open(out);
  serializer.serialize(newSplit);
  // TODO MR Compat: Check against max block locations per split.
  ByteString splitBs = out.toByteString();
  builder.setSplitBytes(splitBs);

  return builder.build();
}
 
开发者ID:apache,项目名称:tez,代码行数:23,代码来源:MRInputHelpers.java

示例10: write

import org.apache.hadoop.io.serializer.Serializer; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
public void write(DataOutput out) throws IOException {
  Text.writeString(out, inputSplitClass.getName());
  Text.writeString(out, inputFormatClass.getName());
  Text.writeString(out, mapperClass.getName());
  SerializationFactory factory = new SerializationFactory(conf);
  Serializer serializer = 
        factory.getSerializer(inputSplitClass);
  serializer.open((DataOutputStream)out);
  serializer.serialize(inputSplit);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:12,代码来源:TaggedInputSplit.java

示例11: writeNewSplits

import org.apache.hadoop.io.serializer.Serializer; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
private static <T extends InputSplit> 
SplitMetaInfo[] writeNewSplits(Configuration conf, 
    T[] array, FSDataOutputStream out)
throws IOException, InterruptedException {

  SplitMetaInfo[] info = new SplitMetaInfo[array.length];
  if (array.length != 0) {
    SerializationFactory factory = new SerializationFactory(conf);
    int i = 0;
    int maxBlockLocations = conf.getInt(MRConfig.MAX_BLOCK_LOCATIONS_KEY,
        MRConfig.MAX_BLOCK_LOCATIONS_DEFAULT);
    long offset = out.getPos();
    for(T split: array) {
      long prevCount = out.getPos();
      Text.writeString(out, split.getClass().getName());
      Serializer<T> serializer = 
        factory.getSerializer((Class<T>) split.getClass());
      serializer.open(out);
      serializer.serialize(split);
      long currCount = out.getPos();
      String[] locations = split.getLocations();
      if (locations.length > maxBlockLocations) {
        LOG.warn("Max block location exceeded for split: "
            + split + " splitsize: " + locations.length +
            " maxsize: " + maxBlockLocations);
        locations = Arrays.copyOf(locations, maxBlockLocations);
      }
      info[i++] = 
        new JobSplit.SplitMetaInfo( 
            locations, offset,
            split.getLength());
      offset += currCount - prevCount;
    }
  }
  return info;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:38,代码来源:JobSplitWriter.java

示例12: writeNewSplits

import org.apache.hadoop.io.serializer.Serializer; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
private static <T extends InputSplit> 
SplitMetaInfo[] writeNewSplits(Configuration conf, 
    T[] array, FSDataOutputStream out)
throws IOException, InterruptedException {

  SplitMetaInfo[] info = new SplitMetaInfo[array.length];
  if (array.length != 0) {
    SerializationFactory factory = new SerializationFactory(conf);
    int i = 0;
    long offset = out.getPos();
    for(T split: array) {
      long prevCount = out.getPos();
      Text.writeString(out, split.getClass().getName());
      Serializer<T> serializer = 
        factory.getSerializer((Class<T>) split.getClass());
      serializer.open(out);
      serializer.serialize(split);
      long currCount = out.getPos();
      info[i++] = 
        new JobSplit.SplitMetaInfo( 
            split.getLocations(), offset,
            split.getLength());
      offset += currCount - prevCount;
    }
  }
  return info;
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:29,代码来源:JobSplitWriter.java

示例13: writeNewSplits

import org.apache.hadoop.io.serializer.Serializer; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
private static <T extends InputSplit> 
SplitMetaInfo[] writeNewSplits(Configuration conf, 
    T[] array, FSDataOutputStream out)
throws IOException, InterruptedException {

  SplitMetaInfo[] info = new SplitMetaInfo[array.length];
  if (array.length != 0) {
    SerializationFactory factory = new SerializationFactory(conf);
    int i = 0;
    int maxBlockLocations = conf.getInt(MRConfig.MAX_BLOCK_LOCATIONS_KEY,
        MRConfig.MAX_BLOCK_LOCATIONS_DEFAULT);
    long offset = out.getPos();
    for(T split: array) {
      long prevCount = out.getPos();
      Text.writeString(out, split.getClass().getName());
      Serializer<T> serializer = 
        factory.getSerializer((Class<T>) split.getClass());
      serializer.open(out);
      serializer.serialize(split);
      long currCount = out.getPos();
      String[] locations = split.getLocations();
      if (locations.length > maxBlockLocations) {
        throw new IOException("Max block location exceeded for split: "
            + split + " splitsize: " + locations.length +
            " maxsize: " + maxBlockLocations);
      }
      info[i++] = 
        new JobSplit.SplitMetaInfo( 
            locations, offset,
            split.getLength());
      offset += currCount - prevCount;
    }
  }
  return info;
}
 
开发者ID:chendave,项目名称:hadoop-TCP,代码行数:37,代码来源:JobSplitWriter.java

示例14: writeNewSplits

import org.apache.hadoop.io.serializer.Serializer; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
private static <T extends InputSplit> 
SplitMetaInfo[] writeNewSplits(Configuration conf, 
    T[] array, FSDataOutputStream out)
throws IOException, InterruptedException {

  SplitMetaInfo[] info = new SplitMetaInfo[array.length];
  if (array.length != 0) {
    SerializationFactory factory = new SerializationFactory(conf);
    int i = 0;
    long offset = out.size();
    for(T split: array) {
      int prevCount = out.size();
      Text.writeString(out, split.getClass().getName());
      Serializer<T> serializer = 
        factory.getSerializer((Class<T>) split.getClass());
      serializer.open(out);
      serializer.serialize(split);
      int currCount = out.size();
      info[i++] = 
        new JobSplit.SplitMetaInfo( 
            split.getLocations(), offset,
            split.getLength());
      offset += currCount - prevCount;
    }
  }
  return info;
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:29,代码来源:JobSplitWriter.java

示例15: createInMemStreams

import org.apache.hadoop.io.serializer.Serializer; //导入方法依赖的package包/类
/**
 * create inmemory segments
 *
 * @return
 * @throws IOException
 */
@SuppressWarnings("unchecked")
public List<TezMerger.Segment> createInMemStreams() throws IOException {
  int numberOfStreams = Math.max(2, rnd.nextInt(10));
  LOG.info("No of streams : " + numberOfStreams);

  SerializationFactory serializationFactory = new SerializationFactory(conf);
  Serializer keySerializer = serializationFactory.getSerializer(keyClass);
  Serializer valueSerializer = serializationFactory.getSerializer(valClass);

  LocalDirAllocator localDirAllocator =
      new LocalDirAllocator(TezRuntimeFrameworkConfigs.LOCAL_DIRS);
  InputContext context = createTezInputContext();
  MergeManager mergeManager = new MergeManager(conf, fs, localDirAllocator,
      context, null, null, null, null, null, 1024 * 1024 * 10, null, false, -1);

  DataOutputBuffer keyBuf = new DataOutputBuffer();
  DataOutputBuffer valBuf = new DataOutputBuffer();
  DataInputBuffer keyIn = new DataInputBuffer();
  DataInputBuffer valIn = new DataInputBuffer();
  keySerializer.open(keyBuf);
  valueSerializer.open(valBuf);

  List<TezMerger.Segment> segments = new LinkedList<TezMerger.Segment>();
  for (int i = 0; i < numberOfStreams; i++) {
    BoundedByteArrayOutputStream bout = new BoundedByteArrayOutputStream(1024 * 1024);
    InMemoryWriter writer =
        new InMemoryWriter(bout);
    Map<Writable, Writable> data = createData();
    //write data
    for (Map.Entry<Writable, Writable> entry : data.entrySet()) {
      keySerializer.serialize(entry.getKey());
      valueSerializer.serialize(entry.getValue());
      keyIn.reset(keyBuf.getData(), 0, keyBuf.getLength());
      valIn.reset(valBuf.getData(), 0, valBuf.getLength());
      writer.append(keyIn, valIn);
      originalData.put(entry.getKey(), entry.getValue());
      keyBuf.reset();
      valBuf.reset();
      keyIn.reset();
      valIn.reset();
    }
    IFile.Reader reader = new InMemoryReader(mergeManager, null, bout.getBuffer(), 0,
        bout.getBuffer().length);
    segments.add(new TezMerger.Segment(reader, null));

    data.clear();
    writer.close();
  }
  return segments;
}
 
开发者ID:apache,项目名称:tez,代码行数:57,代码来源:TestValuesIterator.java


注:本文中的org.apache.hadoop.io.serializer.Serializer.open方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。