当前位置: 首页>>代码示例>>Java>>正文


Java Serializer.close方法代码示例

本文整理汇总了Java中org.apache.hadoop.io.serializer.Serializer.close方法的典型用法代码示例。如果您正苦于以下问题:Java Serializer.close方法的具体用法?Java Serializer.close怎么用?Java Serializer.close使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.io.serializer.Serializer的用法示例。


在下文中一共展示了Serializer.close方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: serDeser

import org.apache.hadoop.io.serializer.Serializer; //导入方法依赖的package包/类
private <K> K serDeser(K conf) throws Exception {
  SerializationFactory factory = new SerializationFactory(CONF);
  Serializer<K> serializer =
    factory.getSerializer(GenericsUtil.getClass(conf));
  Deserializer<K> deserializer =
    factory.getDeserializer(GenericsUtil.getClass(conf));

  DataOutputBuffer out = new DataOutputBuffer();
  serializer.open(out);
  serializer.serialize(conf);
  serializer.close();

  DataInputBuffer in = new DataInputBuffer();
  in.reset(out.getData(), out.getLength());
  deserializer.open(in);
  K after = deserializer.deserialize(null);
  deserializer.close();
  return after;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestWritableJobConf.java

示例2: makeCopyForPassByValue

import org.apache.hadoop.io.serializer.Serializer; //导入方法依赖的package包/类
private <E> E makeCopyForPassByValue(Serialization<E> serialization,
                                      E obj) throws IOException {
  Serializer<E> ser =
    serialization.getSerializer(GenericsUtil.getClass(obj));
  Deserializer<E> deser =
    serialization.getDeserializer(GenericsUtil.getClass(obj));

  DataOutputBuffer dof = threadLocalDataOutputBuffer.get();

  dof.reset();
  ser.open(dof);
  ser.serialize(obj);
  ser.close();
  obj = ReflectionUtils.newInstance(GenericsUtil.getClass(obj),
                                    getChainJobConf());
  ByteArrayInputStream bais =
    new ByteArrayInputStream(dof.getData(), 0, dof.getLength());
  deser.open(bais);
  deser.deserialize(obj);
  deser.close();
  return obj;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:Chain.java

示例3: cloneObj

import org.apache.hadoop.io.serializer.Serializer; //导入方法依赖的package包/类
private <T> T cloneObj(T t) throws IOException
{
  Serializer<T> keySerializer;
  Class<T> keyClass;
  PipedInputStream pis = new PipedInputStream();
  PipedOutputStream pos = new PipedOutputStream(pis);
  keyClass = (Class<T>)t.getClass();
  keySerializer = serializationFactory.getSerializer(keyClass);
  keySerializer.open(pos);
  keySerializer.serialize(t);
  Deserializer<T> keyDesiralizer = serializationFactory.getDeserializer(keyClass);
  keyDesiralizer.open(pis);
  T clonedArg0 = keyDesiralizer.deserialize(null);
  pos.close();
  pis.close();
  keySerializer.close();
  keyDesiralizer.close();
  return clonedArg0;

}
 
开发者ID:apache,项目名称:apex-malhar,代码行数:21,代码来源:OutputCollectorImpl.java

示例4: testSerializer

import org.apache.hadoop.io.serializer.Serializer; //导入方法依赖的package包/类
@Test
public void testSerializer() throws Exception {
   WebPage originalWebPage = new WebPage(new URL("http://www.jboss.org"), "opensource", 10L);

   ByteArrayOutputStream baos = new ByteArrayOutputStream();

   JBossMarshallerSerialization<WebPage> marshallerSerialization = new JBossMarshallerSerialization<>();
   Serializer<WebPage> serializer = marshallerSerialization.getSerializer(WebPage.class);
   serializer.open(baos);
   serializer.serialize(originalWebPage);
   serializer.close();

   Deserializer<WebPage> deserializer = marshallerSerialization.getDeserializer(WebPage.class);
   deserializer.open(new ByteArrayInputStream(baos.toByteArray()));
   WebPage deserializedWebPage = deserializer.deserialize(null);
   deserializer.close();

   assertEquals(deserializedWebPage, originalWebPage);
}
 
开发者ID:infinispan,项目名称:infinispan-hadoop,代码行数:20,代码来源:SerializerTest.java

示例5: computeNewSplits

import org.apache.hadoop.io.serializer.Serializer; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
private <T extends org.apache.hadoop.mapreduce.InputSplit>
List<RawSplit> computeNewSplits(JobContext job)
    throws IOException, InterruptedException, ClassNotFoundException {
  JobConf conf = job.getJobConf();
  org.apache.hadoop.mapreduce.InputFormat<?,?> input =
    ReflectionUtils.newInstance(job.getInputFormatClass(), job.getJobConf());

  List<org.apache.hadoop.mapreduce.InputSplit> splits = input.getSplits(job);
  T[] array = (T[])
    splits.toArray(new org.apache.hadoop.mapreduce.InputSplit[splits.size()]);

  validateNumberOfTasks(splits.size(),job.getNumReduceTasks(),conf);
  
  // sort the splits into order based on size, so that the biggest
  // go first
  Arrays.sort(array, new NewSplitComparator());
  List<RawSplit> rawSplits = new ArrayList<RawSplit>();
  if (array.length != 0) {
    DataOutputBuffer buffer = new DataOutputBuffer();
    SerializationFactory factory = new SerializationFactory(conf);
    Serializer<T> serializer =
      factory.getSerializer((Class<T>) array[0].getClass());
    serializer.open(buffer);
    for(T split: array) {
      RawSplit rawSplit = new RawSplit();
      rawSplit.setClassName(split.getClass().getName());
      buffer.reset();
      serializer.serialize(split);
      rawSplit.setDataLength(split.getLength());
      rawSplit.setBytes(buffer.getData(), 0, buffer.getLength());
      rawSplit.setLocations(split.getLocations());
      rawSplits.add(rawSplit);
    }
    serializer.close();
  }
  return rawSplits;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:39,代码来源:JobClient.java

示例6: testNodeProcessingSchema

import org.apache.hadoop.io.serializer.Serializer; //导入方法依赖的package包/类
public void testNodeProcessingSchema(MapOperator<LongWritable, Text, Text, IntWritable> oper) throws IOException
{

  CollectorTestSink sortSink = new CollectorTestSink();
  oper.output.setSink(sortSink);

  oper.setMapClass(WordCount.Map.class);
  oper.setCombineClass(WordCount.Reduce.class);
  oper.setDirName(testMeta.testDir);
  oper.setConfigFile(null);
  oper.setInputFormatClass(TextInputFormat.class);

  Configuration conf = new Configuration();
  JobConf jobConf = new JobConf(conf);
  FileInputFormat.setInputPaths(jobConf, new Path(testMeta.testDir));
  TextInputFormat inputFormat = new TextInputFormat();
  inputFormat.configure(jobConf);
  InputSplit[] splits = inputFormat.getSplits(jobConf, 1);
  SerializationFactory serializationFactory = new SerializationFactory(conf);
  Serializer keySerializer = serializationFactory.getSerializer(splits[0].getClass());
  keySerializer.open(oper.getOutstream());
  keySerializer.serialize(splits[0]);
  oper.setInputSplitClass(splits[0].getClass());
  keySerializer.close();
  oper.setup(null);
  oper.beginWindow(0);
  oper.emitTuples();
  oper.emitTuples();
  oper.endWindow();
  oper.beginWindow(1);
  oper.emitTuples();
  oper.endWindow();

  Assert.assertEquals("number emitted tuples", 3, sortSink.collectedTuples.size());
  for (Object o : sortSink.collectedTuples) {
    LOG.debug(o.toString());
  }
  LOG.debug("Done testing round\n");
  oper.teardown();
}
 
开发者ID:apache,项目名称:apex-malhar,代码行数:41,代码来源:MapOperatorTest.java

示例7: close

import org.apache.hadoop.io.serializer.Serializer; //导入方法依赖的package包/类
@Override
public void close() throws IOException {
    for (Serializer serializer : serializerUsed) {
        if (serializer != null) {
            serializer.close();
        }
    }
    dataOut.close();
}
 
开发者ID:elazarl,项目名称:multireducers,代码行数:10,代码来源:MultiSerializer.java

示例8: computeNewSplits

import org.apache.hadoop.io.serializer.Serializer; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
private <T extends org.apache.hadoop.mapreduce.InputSplit>
List<RawSplit> computeNewSplits(JobContext job)
    throws IOException, InterruptedException, ClassNotFoundException {
  JobConf conf = job.getJobConf();
  org.apache.hadoop.mapreduce.InputFormat<?,?> input =
    ReflectionUtils.newInstance(job.getInputFormatClass(), job.getJobConf());

  List<org.apache.hadoop.mapreduce.InputSplit> splits = input.getSplits(job);
  T[] array = (T[])
    splits.toArray(new org.apache.hadoop.mapreduce.InputSplit[splits.size()]);

  // sort the splits into order based on size, so that the biggest
  // go first
  Arrays.sort(array, new NewSplitComparator());
  List<RawSplit> rawSplits = new ArrayList<RawSplit>();
  if (array.length != 0) {
    DataOutputBuffer buffer = new DataOutputBuffer();
    SerializationFactory factory = new SerializationFactory(conf);
    Serializer<T> serializer =
      factory.getSerializer((Class<T>) array[0].getClass());
    serializer.open(buffer);
    for(T split: array) {
      RawSplit rawSplit = new RawSplit();
      rawSplit.setClassName(split.getClass().getName());
      buffer.reset();
      serializer.serialize(split);
      rawSplit.setDataLength(split.getLength());
      rawSplit.setBytes(buffer.getData(), 0, buffer.getLength());
      rawSplit.setLocations(split.getLocations());
      rawSplits.add(rawSplit);
    }
    serializer.close();
  }
  return rawSplits;
}
 
开发者ID:iVCE,项目名称:RDFS,代码行数:37,代码来源:JobClient.java

示例9: writeNewSplits

import org.apache.hadoop.io.serializer.Serializer; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
private <T extends org.apache.hadoop.mapreduce.InputSplit> 
int writeNewSplits(JobContext job, Path submitSplitFile
                   ) throws IOException, InterruptedException, 
                            ClassNotFoundException {
  JobConf conf = job.getJobConf();
  org.apache.hadoop.mapreduce.InputFormat<?,?> input =
    ReflectionUtils.newInstance(job.getInputFormatClass(), job.getJobConf());
  
  List<org.apache.hadoop.mapreduce.InputSplit> splits = input.getSplits(job);
  T[] array = (T[])
    splits.toArray(new org.apache.hadoop.mapreduce.InputSplit[splits.size()]);

  // sort the splits into order based on size, so that the biggest
  // go first
  Arrays.sort(array, new NewSplitComparator());
  DataOutputStream out = writeSplitsFileHeader(conf, submitSplitFile, 
                                               array.length);
  try {
    if (array.length != 0) {
      DataOutputBuffer buffer = new DataOutputBuffer();
      RawSplit rawSplit = new RawSplit();
      SerializationFactory factory = new SerializationFactory(conf);
      Serializer<T> serializer = 
        factory.getSerializer((Class<T>) array[0].getClass());
      serializer.open(buffer);
      for(T split: array) {
        rawSplit.setClassName(split.getClass().getName());
        buffer.reset();
        serializer.serialize(split);
        rawSplit.setDataLength(split.getLength());
        rawSplit.setBytes(buffer.getData(), 0, buffer.getLength());
        rawSplit.setLocations(split.getLocations());
        rawSplit.write(out);
      }
      serializer.close();
    }
  } finally {
    out.close();
  }
  return array.length;
}
 
开发者ID:thisisvoa,项目名称:hadoop-0.20,代码行数:43,代码来源:JobClient.java


注:本文中的org.apache.hadoop.io.serializer.Serializer.close方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。