當前位置: 首頁>>代碼示例>>Java>>正文


Java SerializationFactory.getSerializer方法代碼示例

本文整理匯總了Java中org.apache.hadoop.io.serializer.SerializationFactory.getSerializer方法的典型用法代碼示例。如果您正苦於以下問題:Java SerializationFactory.getSerializer方法的具體用法?Java SerializationFactory.getSerializer怎麽用?Java SerializationFactory.getSerializer使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.io.serializer.SerializationFactory的用法示例。


在下文中一共展示了SerializationFactory.getSerializer方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: copy

import org.apache.hadoop.io.serializer.SerializationFactory; //導入方法依賴的package包/類
/**
 * Make a copy of the writable object using serialization to a buffer
 * @param src the object to copy from
 * @param dst the object to copy into, which is destroyed
 * @return dst param (the copy)
 * @throws IOException
 */
@SuppressWarnings("unchecked")
public static <T> T copy(Configuration conf, 
                              T src, T dst) throws IOException {
  CopyInCopyOutBuffer buffer = CLONE_BUFFERS.get();
  buffer.outBuffer.reset();
  SerializationFactory factory = getFactory(conf);
  Class<T> cls = (Class<T>) src.getClass();
  Serializer<T> serializer = factory.getSerializer(cls);
  serializer.open(buffer.outBuffer);
  serializer.serialize(src);
  buffer.moveData();
  Deserializer<T> deserializer = factory.getDeserializer(cls);
  deserializer.open(buffer.inBuffer);
  dst = deserializer.deserialize(dst);
  return dst;
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:24,代碼來源:ReflectionUtils.java

示例2: copy

import org.apache.hadoop.io.serializer.SerializationFactory; //導入方法依賴的package包/類
/**
 * Make a copy of the writable object using serialization to a buffer
 * @param dst the object to copy from
 * @param src the object to copy into, which is destroyed
 * @throws IOException
 */
@SuppressWarnings("unchecked")
public static <T> T copy(Configuration conf, 
                              T src, T dst) throws IOException {
  CopyInCopyOutBuffer buffer = cloneBuffers.get();
  buffer.outBuffer.reset();
  SerializationFactory factory = getFactory(conf);
  Class<T> cls = (Class<T>) src.getClass();
  Serializer<T> serializer = factory.getSerializer(cls);
  serializer.open(buffer.outBuffer);
  serializer.serialize(src);
  buffer.moveData();
  Deserializer<T> deserializer = factory.getDeserializer(cls);
  deserializer.open(buffer.inBuffer);
  dst = deserializer.deserialize(dst);
  return dst;
}
 
開發者ID:spafka,項目名稱:spark_deep,代碼行數:23,代碼來源:ReflectionUtils.java

示例3: KeyValueWriter

import org.apache.hadoop.io.serializer.SerializationFactory; //導入方法依賴的package包/類
public KeyValueWriter(Configuration conf, OutputStream output,
                      Class<K> kyClass, Class<V> valClass
                     ) throws IOException {
  keyClass = kyClass;
  valueClass = valClass;
  dataBuffer = new DataOutputBuffer();
  SerializationFactory serializationFactory
                                         = new SerializationFactory(conf);
  keySerializer
              = (Serializer<K>)serializationFactory.getSerializer(keyClass);
  keySerializer.open(dataBuffer);
  valueSerializer
            = (Serializer<V>)serializationFactory.getSerializer(valueClass);
  valueSerializer.open(dataBuffer);
  outputStream = new DataOutputStream(output);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:17,代碼來源:TestMerge.java

示例4: serDeser

import org.apache.hadoop.io.serializer.SerializationFactory; //導入方法依賴的package包/類
private <K> K serDeser(K conf) throws Exception {
  SerializationFactory factory = new SerializationFactory(CONF);
  Serializer<K> serializer =
    factory.getSerializer(GenericsUtil.getClass(conf));
  Deserializer<K> deserializer =
    factory.getDeserializer(GenericsUtil.getClass(conf));

  DataOutputBuffer out = new DataOutputBuffer();
  serializer.open(out);
  serializer.serialize(conf);
  serializer.close();

  DataInputBuffer in = new DataInputBuffer();
  in.reset(out.getData(), out.getLength());
  deserializer.open(in);
  K after = deserializer.deserialize(null);
  deserializer.close();
  return after;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:20,代碼來源:TestWritableJobConf.java

示例5: copy

import org.apache.hadoop.io.serializer.SerializationFactory; //導入方法依賴的package包/類
/**
 * Make a copy of the writable object using serialization to a buffer
 * @param src the object to copy from
 * @param dst the object to copy into, which is destroyed
 * @return dst param (the copy)
 * @throws IOException
 */
@SuppressWarnings("unchecked")
public static <T> T copy(Configuration conf, 
                              T src, T dst) throws IOException {
  CopyInCopyOutBuffer buffer = cloneBuffers.get();
  buffer.outBuffer.reset();
  SerializationFactory factory = getFactory(conf);
  Class<T> cls = (Class<T>) src.getClass();
  Serializer<T> serializer = factory.getSerializer(cls);
  serializer.open(buffer.outBuffer);
  serializer.serialize(src);
  buffer.moveData();
  Deserializer<T> deserializer = factory.getDeserializer(cls);
  deserializer.open(buffer.inBuffer);
  dst = deserializer.deserialize(dst);
  return dst;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:24,代碼來源:ReflectionUtils.java

示例6: write

import org.apache.hadoop.io.serializer.SerializationFactory; //導入方法依賴的package包/類
@SuppressWarnings("unchecked")
public void write(DataOutput os) throws IOException {
    os.writeBoolean(disableCounter);
    os.writeBoolean(isMultiInputs);
    os.writeInt(totalSplits);
    os.writeInt(splitIndex);
    os.writeInt(inputIndex);
    writeObject(targetOps, os);
    os.writeInt(wrappedSplits.length);
    os.writeUTF(wrappedSplits[0].getClass().getName());
    SerializationFactory sf = new SerializationFactory(conf);
    Serializer s = 
        sf.getSerializer(wrappedSplits[0].getClass());
     
    //Checks if Serializer is NULL or not before calling open() method on it.         
    if (s == null) {
        	throw new IllegalArgumentException("Could not find Serializer for class "+wrappedSplits[0].getClass()+". InputSplits must implement Writable.");
    }        
    s.open((OutputStream) os);
    for (int i = 0; i < wrappedSplits.length; i++)
    {
        // The correct call sequence for Serializer is, we shall open, then serialize, but we shall not close
        s.serialize(wrappedSplits[i]);
    }
    
}
 
開發者ID:sigmoidanalytics,項目名稱:spork-streaming,代碼行數:27,代碼來源:PigSplit.java

示例7: write

import org.apache.hadoop.io.serializer.SerializationFactory; //導入方法依賴的package包/類
@SuppressWarnings("unchecked")
public void write(DataOutput os) throws IOException {
    os.writeBoolean(disableCounter);
    os.writeBoolean(isMultiInputs);
    os.writeInt(totalSplits);
    os.writeInt(splitIndex);
    os.writeInt(inputIndex);
    writeObject(targetOps, os);
    os.writeInt(wrappedSplits.length);
    os.writeUTF(wrappedSplits[0].getClass().getName());
    SerializationFactory sf = new SerializationFactory(conf);
    Serializer s = 
        sf.getSerializer(wrappedSplits[0].getClass());
    s.open((OutputStream) os);
    for (int i = 0; i < wrappedSplits.length; i++)
    {
        // The correct call sequence for Serializer is, we shall open, then serialize, but we shall not close
        s.serialize(wrappedSplits[i]);
    }
    
}
 
開發者ID:kaituo,項目名稱:sedge,代碼行數:22,代碼來源:PigSplit.java

示例8: createSplitProto

import org.apache.hadoop.io.serializer.SerializationFactory; //導入方法依賴的package包/類
@InterfaceStability.Evolving
public static <T extends org.apache.hadoop.mapreduce.InputSplit> MRRuntimeProtos.MRSplitProto createSplitProto(
    T newSplit, SerializationFactory serializationFactory)
    throws IOException, InterruptedException {
  MRRuntimeProtos.MRSplitProto.Builder builder = MRRuntimeProtos.MRSplitProto
      .newBuilder();

  builder.setSplitClassName(newSplit.getClass().getName());

  @SuppressWarnings("unchecked")
  Serializer<T> serializer = serializationFactory
      .getSerializer((Class<T>) newSplit.getClass());
  ByteString.Output out = ByteString
      .newOutput(SPLIT_SERIALIZED_LENGTH_ESTIMATE);
  serializer.open(out);
  serializer.serialize(newSplit);
  // TODO MR Compat: Check against max block locations per split.
  ByteString splitBs = out.toByteString();
  builder.setSplitBytes(splitBs);

  return builder.build();
}
 
開發者ID:apache,項目名稱:tez,代碼行數:23,代碼來源:MRInputHelpers.java

示例9: DefaultStringifier

import org.apache.hadoop.io.serializer.SerializationFactory; //導入方法依賴的package包/類
public DefaultStringifier(Configuration conf, Class<T> c) {

    SerializationFactory factory = new SerializationFactory(conf);
    this.serializer = factory.getSerializer(c);
    this.deserializer = factory.getDeserializer(c);
    this.inBuf = new DataInputBuffer();
    this.outBuf = new DataOutputBuffer();
    try {
      serializer.open(outBuf);
      deserializer.open(inBuf);
    } catch (IOException ex) {
      throw new RuntimeException(ex);
    }
  }
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:15,代碼來源:DefaultStringifier.java

示例10: init

import org.apache.hadoop.io.serializer.SerializationFactory; //導入方法依賴的package包/類
void init(Configuration conf, FSDataOutputStream out, boolean ownStream)
    throws IOException {
  this.conf = conf;
  this.out = out;
  this.ownOutputStream = ownStream;
  SerializationFactory serializationFactory = new SerializationFactory(conf);
  this.keySerializer = serializationFactory.getSerializer(WALEntry.class);
  if (this.keySerializer == null) {
    throw new IOException(
        "Could not find a serializer for the Key class: '"
        + WALEntry.class.getCanonicalName() + "'. "
        + "Please ensure that the configuration '" +
        CommonConfigurationKeys.IO_SERIALIZATIONS_KEY + "' is "
        + "properly configured, if you're using"
        + "custom serialization.");
  }
  this.keySerializer.open(buffer);
  this.valSerializer = serializationFactory.getSerializer(WALEntry.class);
  if (this.valSerializer == null) {
    throw new IOException(
        "Could not find a serializer for the Value class: '"
        + WALEntry.class.getCanonicalName() + "'. "
        + "Please ensure that the configuration '" +
        CommonConfigurationKeys.IO_SERIALIZATIONS_KEY + "' is "
        + "properly configured, if you're using"
        + "custom serialization.");
  }
  this.valSerializer.open(buffer);
  if (appendMode) {
    sync();
  } else {
    writeFileHeader();
  }
}
 
開發者ID:jiangxiluning,項目名稱:kafka-connect-hdfs,代碼行數:35,代碼來源:WALFile.java

示例11: Writer

import org.apache.hadoop.io.serializer.SerializationFactory; //導入方法依賴的package包/類
public Writer(Configuration conf, FSDataOutputStream out, 
    Class<K> keyClass, Class<V> valueClass,
    CompressionCodec codec, Counters.Counter writesCounter,
    boolean ownOutputStream)
    throws IOException {
  this.writtenRecordsCounter = writesCounter;
  this.checksumOut = new IFileOutputStream(out);
  this.rawOut = out;
  this.start = this.rawOut.getPos();
  if (codec != null) {
    this.compressor = CodecPool.getCompressor(codec);
    if (this.compressor != null) {
      this.compressor.reset();
      this.compressedOut = codec.createOutputStream(checksumOut, compressor);
      this.out = new FSDataOutputStream(this.compressedOut,  null);
      this.compressOutput = true;
    } else {
      LOG.warn("Could not obtain compressor from CodecPool");
      this.out = new FSDataOutputStream(checksumOut,null);
    }
  } else {
    this.out = new FSDataOutputStream(checksumOut,null);
  }
  
  this.keyClass = keyClass;
  this.valueClass = valueClass;

  if (keyClass != null) {
    SerializationFactory serializationFactory = 
      new SerializationFactory(conf);
    this.keySerializer = serializationFactory.getSerializer(keyClass);
    this.keySerializer.open(buffer);
    this.valueSerializer = serializationFactory.getSerializer(valueClass);
    this.valueSerializer.open(buffer);
  }
  this.ownOutputStream = ownOutputStream;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:38,代碼來源:IFile.java

示例12: write

import org.apache.hadoop.io.serializer.SerializationFactory; //導入方法依賴的package包/類
@SuppressWarnings("unchecked")
public void write(DataOutput out) throws IOException {
  Text.writeString(out, inputSplitClass.getName());
  Text.writeString(out, inputFormatClass.getName());
  Text.writeString(out, mapperClass.getName());
  SerializationFactory factory = new SerializationFactory(conf);
  Serializer serializer = 
        factory.getSerializer(inputSplitClass);
  serializer.open((DataOutputStream)out);
  serializer.serialize(inputSplit);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:12,代碼來源:TaggedInputSplit.java

示例13: writeNewSplits

import org.apache.hadoop.io.serializer.SerializationFactory; //導入方法依賴的package包/類
@SuppressWarnings("unchecked")
private static <T extends InputSplit> 
SplitMetaInfo[] writeNewSplits(Configuration conf, 
    T[] array, FSDataOutputStream out)
throws IOException, InterruptedException {

  SplitMetaInfo[] info = new SplitMetaInfo[array.length];
  if (array.length != 0) {
    SerializationFactory factory = new SerializationFactory(conf);
    int i = 0;
    int maxBlockLocations = conf.getInt(MRConfig.MAX_BLOCK_LOCATIONS_KEY,
        MRConfig.MAX_BLOCK_LOCATIONS_DEFAULT);
    long offset = out.getPos();
    for(T split: array) {
      long prevCount = out.getPos();
      Text.writeString(out, split.getClass().getName());
      Serializer<T> serializer = 
        factory.getSerializer((Class<T>) split.getClass());
      serializer.open(out);
      serializer.serialize(split);
      long currCount = out.getPos();
      String[] locations = split.getLocations();
      if (locations.length > maxBlockLocations) {
        LOG.warn("Max block location exceeded for split: "
            + split + " splitsize: " + locations.length +
            " maxsize: " + maxBlockLocations);
        locations = Arrays.copyOf(locations, maxBlockLocations);
      }
      info[i++] = 
        new JobSplit.SplitMetaInfo( 
            locations, offset,
            split.getLength());
      offset += currCount - prevCount;
    }
  }
  return info;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:38,代碼來源:JobSplitWriter.java

示例14: writeNewSplits

import org.apache.hadoop.io.serializer.SerializationFactory; //導入方法依賴的package包/類
@SuppressWarnings("unchecked")
private static <T extends InputSplit> 
SplitMetaInfo[] writeNewSplits(Configuration conf, 
    T[] array, FSDataOutputStream out)
throws IOException, InterruptedException {

  SplitMetaInfo[] info = new SplitMetaInfo[array.length];
  if (array.length != 0) {
    SerializationFactory factory = new SerializationFactory(conf);
    int i = 0;
    int maxBlockLocations = conf.getInt(MRConfig.MAX_BLOCK_LOCATIONS_KEY,
        MRConfig.MAX_BLOCK_LOCATIONS_DEFAULT);
    long offset = out.getPos();
    for(T split: array) {
      long prevCount = out.getPos();
      Text.writeString(out, split.getClass().getName());
      Serializer<T> serializer = 
        factory.getSerializer((Class<T>) split.getClass());
      serializer.open(out);
      serializer.serialize(split);
      long currCount = out.getPos();
      String[] locations = split.getLocations();
      if (locations.length > maxBlockLocations) {
        throw new IOException("Max block location exceeded for split: "
            + split + " splitsize: " + locations.length +
            " maxsize: " + maxBlockLocations);
      }
      info[i++] = 
        new JobSplit.SplitMetaInfo( 
            locations, offset,
            split.getLength());
      offset += currCount - prevCount;
    }
  }
  return info;
}
 
開發者ID:ict-carch,項目名稱:hadoop-plus,代碼行數:37,代碼來源:JobSplitWriter.java

示例15: Writer

import org.apache.hadoop.io.serializer.SerializationFactory; //導入方法依賴的package包/類
public Writer(Configuration conf, FSDataOutputStream out, 
    Class<K> keyClass, Class<V> valueClass,
    CompressionCodec codec, Counters.Counter writesCounter)
    throws IOException {
  this.writtenRecordsCounter = writesCounter;
  this.checksumOut = new IFileOutputStream(out);
  this.rawOut = out;
  this.start = this.rawOut.getPos();
  if (codec != null) {
    this.compressor = CodecPool.getCompressor(codec);
    if (this.compressor != null) {
      this.compressor.reset();
      this.compressedOut = codec.createOutputStream(checksumOut, compressor);
      this.out = new FSDataOutputStream(this.compressedOut,  null);
      this.compressOutput = true;
    } else {
      LOG.warn("Could not obtain compressor from CodecPool");
      this.out = new FSDataOutputStream(checksumOut,null);
    }
  } else {
    this.out = new FSDataOutputStream(checksumOut,null);
  }
  
  this.keyClass = keyClass;
  this.valueClass = valueClass;
  SerializationFactory serializationFactory = new SerializationFactory(conf);
  this.keySerializer = serializationFactory.getSerializer(keyClass);
  this.keySerializer.open(buffer);
  this.valueSerializer = serializationFactory.getSerializer(valueClass);
  this.valueSerializer.open(buffer);
}
 
開發者ID:Nextzero,項目名稱:hadoop-2.6.0-cdh5.4.3,代碼行數:32,代碼來源:IFile.java


注:本文中的org.apache.hadoop.io.serializer.SerializationFactory.getSerializer方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。