当前位置: 首页>>代码示例>>Java>>正文


Java DefaultCodec类代码示例

本文整理汇总了Java中org.apache.hadoop.io.compress.DefaultCodec的典型用法代码示例。如果您正苦于以下问题:Java DefaultCodec类的具体用法?Java DefaultCodec怎么用?Java DefaultCodec使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


DefaultCodec类属于org.apache.hadoop.io.compress包,在下文中一共展示了DefaultCodec类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testIFileWriterWithCodec

import org.apache.hadoop.io.compress.DefaultCodec; //导入依赖的package包/类
@Test
/**
 * Create an IFile.Writer using GzipCodec since this code does not
 * have a compressor when run via the tests (ie no native libraries).
 */
public void testIFileWriterWithCodec() throws Exception {
  Configuration conf = new Configuration();
  FileSystem localFs = FileSystem.getLocal(conf);
  FileSystem rfs = ((LocalFileSystem)localFs).getRaw();
  Path path = new Path(new Path("build/test.ifile"), "data");
  DefaultCodec codec = new GzipCodec();
  codec.setConf(conf);
  IFile.Writer<Text, Text> writer =
    new IFile.Writer<Text, Text>(conf, rfs.create(path), Text.class, Text.class,
                                 codec, null);
  writer.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:TestIFile.java

示例2: testIFileReaderWithCodec

import org.apache.hadoop.io.compress.DefaultCodec; //导入依赖的package包/类
@Test
/** Same as above but create a reader. */
public void testIFileReaderWithCodec() throws Exception {
  Configuration conf = new Configuration();
  FileSystem localFs = FileSystem.getLocal(conf);
  FileSystem rfs = ((LocalFileSystem)localFs).getRaw();
  Path path = new Path(new Path("build/test.ifile"), "data");
  DefaultCodec codec = new GzipCodec();
  codec.setConf(conf);
  FSDataOutputStream out = rfs.create(path);
  IFile.Writer<Text, Text> writer =
      new IFile.Writer<Text, Text>(conf, out, Text.class, Text.class,
                                   codec, null);
  writer.close();
  FSDataInputStream in = rfs.open(path);
  IFile.Reader<Text, Text> reader =
    new IFile.Reader<Text, Text>(conf, in, rfs.getFileStatus(path).getLen(),
        codec, null);
  reader.close();
  
  // test check sum 
  byte[] ab= new byte[100];
  int readed= reader.checksumIn.readWithChecksum(ab, 0, ab.length);
  assertEquals( readed,reader.checksumIn.getChecksum().length);
  
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:TestIFile.java

示例3: main

import org.apache.hadoop.io.compress.DefaultCodec; //导入依赖的package包/类
/**
 * For running a few tests of methods herein.
 * @param args
 * @throws IOException
 */
public static void main(String[] args) throws IOException {
  int count = 1024;
  int size = 10240;
  for (String arg: args) {
    if (arg.startsWith(COUNT)) {
      count = Integer.parseInt(arg.replace(COUNT, ""));
    } else if (arg.startsWith(SIZE)) {
      size = Integer.parseInt(arg.replace(SIZE, ""));
    } else {
      usage(1);
    }
  }
  IPCUtil util = new IPCUtil(HBaseConfiguration.create());
  ((Log4JLogger)IPCUtil.LOG).getLogger().setLevel(Level.ALL);
  timerTests(util, count, size,  new KeyValueCodec(), null);
  timerTests(util, count, size,  new KeyValueCodec(), new DefaultCodec());
  timerTests(util, count, size,  new KeyValueCodec(), new GzipCodec());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:24,代码来源:TestIPCUtil.java

示例4: getSequenceWriter

import org.apache.hadoop.io.compress.DefaultCodec; //导入依赖的package包/类
protected SequenceFile.Writer getSequenceWriter(TaskAttemptContext context,
    Class<?> keyClass, Class<?> valueClass) 
    throws IOException {
  Configuration conf = context.getConfiguration();
   
  CompressionCodec codec = null;
  CompressionType compressionType = CompressionType.NONE;
  if (getCompressOutput(context)) {
    // find the kind of compression to do
    compressionType = getOutputCompressionType(context);
    // find the right codec
    Class<?> codecClass = getOutputCompressorClass(context, 
                                                   DefaultCodec.class);
    codec = (CompressionCodec) 
      ReflectionUtils.newInstance(codecClass, conf);
  }
  // get the path of the temporary output file 
  Path file = getDefaultWorkFile(context, "");
  FileSystem fs = file.getFileSystem(conf);
  return SequenceFile.createWriter(fs, conf, file,
           keyClass,
           valueClass,
           compressionType,
           codec,
           context);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:27,代码来源:SequenceFileOutputFormat.java

示例5: getDataOutputStream

import org.apache.hadoop.io.compress.DefaultCodec; //导入依赖的package包/类
protected DataOutputStream getDataOutputStream(final TaskAttemptContext job) throws IOException, InterruptedException {
    final Configuration conf = job.getConfiguration();
    boolean isCompressed = getCompressOutput(job);
    CompressionCodec codec = null;
    String extension = "";
    if (isCompressed) {
        final Class<? extends CompressionCodec> codecClass = getOutputCompressorClass(job, DefaultCodec.class);
        codec = ReflectionUtils.newInstance(codecClass, conf);
        extension = codec.getDefaultExtension();
    }
    final Path file = super.getDefaultWorkFile(job, extension);
    final FileSystem fs = file.getFileSystem(conf);
    if (!isCompressed) {
        return new DataOutputStream(fs.create(file, false));
    } else {
        return new DataOutputStream(codec.createOutputStream(fs.create(file, false)));
    }
}
 
开发者ID:PKUSilvester,项目名称:LiteGraph,代码行数:19,代码来源:CommonFileOutputFormat.java

示例6: MapOutputCopier

import org.apache.hadoop.io.compress.DefaultCodec; //导入依赖的package包/类
public MapOutputCopier(JobConf job, Reporter reporter, SecretKey jobTokenSecret) {
  setName("MapOutputCopier " + reduceTask.getTaskID() + "." + id);
  LOG.debug(getName() + " created");
  this.reporter = reporter;

  this.jobTokenSecret = jobTokenSecret;
 
  shuffleConnectionTimeout =
    job.getInt("mapreduce.reduce.shuffle.connect.timeout", STALLED_COPY_TIMEOUT);
  shuffleReadTimeout =
    job.getInt("mapreduce.reduce.shuffle.read.timeout", DEFAULT_READ_TIMEOUT);
  
  if (job.getCompressMapOutput()) {
    Class<? extends CompressionCodec> codecClass =
      job.getMapOutputCompressorClass(DefaultCodec.class);
    codec = ReflectionUtils.newInstance(codecClass, job);
    decompressor = CodecPool.getDecompressor(codec);
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:20,代码来源:ReduceTask.java

示例7: getSequenceWriter

import org.apache.hadoop.io.compress.DefaultCodec; //导入依赖的package包/类
protected SequenceFile.Writer getSequenceWriter(TaskAttemptContext context,
    Class<?> keyClass, Class<?> valueClass)
    throws IOException {
  Configuration conf = context.getConfiguration();

  CompressionCodec codec = null;
  CompressionType compressionType = CompressionType.NONE;
  if (getCompressOutput(context)) {
    // find the kind of compression to do
    compressionType = getOutputCompressionType(context);
    // find the right codec
    Class<?> codecClass = getOutputCompressorClass(context,
                                                   DefaultCodec.class);
    codec = (CompressionCodec)
      ReflectionUtils.newInstance(codecClass, conf);
  }
  // get the path of the temporary output file
  Path file = getDefaultWorkFile(context, "");
  FileSystem fs = file.getFileSystem(conf);
  return SequenceFile.createWriter(fs, conf, file,
           keyClass,
           valueClass,
           compressionType,
           codec,
           context);
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:27,代码来源:SequenceFileAsBinaryOutputFormat.java

示例8: testIFileWriterWithCodec

import org.apache.hadoop.io.compress.DefaultCodec; //导入依赖的package包/类
@Test
/**
 * Create an IFile.Writer using GzipCodec since this codec does not
 * have a compressor when run via the tests (ie no native libraries).
 */
public void testIFileWriterWithCodec() throws Exception {
  Configuration conf = new Configuration();
  FileSystem localFs = FileSystem.getLocal(conf);
  FileSystem rfs = ((LocalFileSystem)localFs).getRaw();
  Path path = new Path(new Path("build/test.ifile"), "data");
  DefaultCodec codec = new GzipCodec();
  codec.setConf(conf);
  IFile.Writer<Text, Text> writer =
    new IFile.Writer<Text, Text>(conf, rfs, path, Text.class, Text.class,
                                 codec, null);
  writer.close();
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:18,代码来源:TestIFile.java

示例9: testIFileReaderWithCodec

import org.apache.hadoop.io.compress.DefaultCodec; //导入依赖的package包/类
@Test
/** Same as above but create a reader. */
public void testIFileReaderWithCodec() throws Exception {
  Configuration conf = new Configuration();
  FileSystem localFs = FileSystem.getLocal(conf);
  FileSystem rfs = ((LocalFileSystem)localFs).getRaw();
  Path path = new Path(new Path("build/test.ifile"), "data");
  DefaultCodec codec = new GzipCodec();
  codec.setConf(conf);
  IFile.Writer<Text, Text> writer =
    new IFile.Writer<Text, Text>(conf, rfs, path, Text.class, Text.class,
                                 codec, null);
  writer.close();
  IFile.Reader<Text, Text> reader =
    new IFile.Reader<Text, Text>(conf, rfs, path, codec, null);
  reader.close();
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:18,代码来源:TestIFile.java

示例10: MapOutputCopier

import org.apache.hadoop.io.compress.DefaultCodec; //导入依赖的package包/类
public MapOutputCopier(JobConf job, Reporter reporter) {
  setName("MapOutputCopier " + reduceTask.getTaskID() + "." + id);
  LOG.debug(getName() + " created");
  this.reporter = reporter;

  shuffleConnectionTimeout =
    job.getInt("mapreduce.reduce.shuffle.connect.timeout", STALLED_COPY_TIMEOUT);
  shuffleReadTimeout =
    job.getInt("mapreduce.reduce.shuffle.read.timeout", DEFAULT_READ_TIMEOUT);

  if (job.getCompressMapOutput()) {
    Class<? extends CompressionCodec> codecClass =
      job.getMapOutputCompressorClass(DefaultCodec.class);
    codec = ReflectionUtils.newInstance(codecClass, job);
    decompressor = CodecPool.getDecompressor(codec);
  }
  setDaemon(true);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:19,代码来源:ReduceTask.java

示例11: setStoreLocation

import org.apache.hadoop.io.compress.DefaultCodec; //导入依赖的package包/类
@Override
public void setStoreLocation(String location, Job job)
    throws IOException {
  job.setOutputKeyClass(keyClass);
  job.setOutputValueClass(valueClass);
  if (compressionType != null && compressionCodecClass != null) {
    Class<? extends CompressionCodec> codecClass =
        FileOutputFormat.getOutputCompressorClass(job,
            DefaultCodec.class);
    SequenceFileOutputFormat.
        setOutputCompressorClass(job, codecClass);
    SequenceFileOutputFormat.setOutputCompressionType(job,
        SequenceFile.CompressionType.valueOf(compressionType));
  }
  FileOutputFormat.setOutputPath(job, new Path(location));
}
 
开发者ID:Hanmourang,项目名称:hiped2,代码行数:17,代码来源:SequenceFileStoreFunc.java

示例12: put

import org.apache.hadoop.io.compress.DefaultCodec; //导入依赖的package包/类
/**
 * See {@link StateStore#put(String, String, T)}.
 *
 * <p>
 *   This implementation does not support putting the state object into an existing store as
 *   append is to be supported by the Hadoop SequenceFile (HADOOP-7139).
 * </p>
 */
@Override
public void put(String storeName, String tableName, T state)
    throws IOException {
  Path tablePath = new Path(new Path(this.storeRootDir, storeName), tableName);
  if (!this.fs.exists(tablePath) && !create(storeName, tableName)) {
    throw new IOException("Failed to create a state file for table " + tableName);
  }

  Closer closer = Closer.create();
  try {
    SequenceFile.Writer writer =
        closer.register(SequenceFile.createWriter(this.fs, this.conf, tablePath, Text.class, this.stateClass,
            SequenceFile.CompressionType.BLOCK, new DefaultCodec()));
    writer.append(new Text(Strings.nullToEmpty(state.getId())), state);
  } catch (Throwable t) {
    throw closer.rethrow(t);
  } finally {
    closer.close();
  }
}
 
开发者ID:Hanmourang,项目名称:Gobblin,代码行数:29,代码来源:FsStateStore.java

示例13: putAll

import org.apache.hadoop.io.compress.DefaultCodec; //导入依赖的package包/类
/**
 * See {@link StateStore#putAll(String, String, Collection)}.
 *
 * <p>
 *   This implementation does not support putting the state objects into an existing store as
 *   append is to be supported by the Hadoop SequenceFile (HADOOP-7139).
 * </p>
 */
@Override
public void putAll(String storeName, String tableName, Collection<T> states)
    throws IOException {
  Path tablePath = new Path(new Path(this.storeRootDir, storeName), tableName);
  if (!this.fs.exists(tablePath) && !create(storeName, tableName)) {
    throw new IOException("Failed to create a state file for table " + tableName);
  }

  Closer closer = Closer.create();
  try {
    SequenceFile.Writer writer =
        closer.register(SequenceFile.createWriter(this.fs, this.conf, tablePath, Text.class, this.stateClass,
            SequenceFile.CompressionType.BLOCK, new DefaultCodec()));
    for (T state : states) {
      writer.append(new Text(Strings.nullToEmpty(state.getId())), state);
    }
  } catch (Throwable t) {
    throw closer.rethrow(t);
  } finally {
    closer.close();
  }
}
 
开发者ID:Hanmourang,项目名称:Gobblin,代码行数:31,代码来源:FsStateStore.java

示例14: getCompressionCodecClass

import org.apache.hadoop.io.compress.DefaultCodec; //导入依赖的package包/类
/**
 * @return the compression codec in use for this file.
 */
public Class<? extends CompressionCodec> getCompressionCodecClass() {
	if (!isReader)
		return DefaultCodec.class;

	Reader reader = null;
	try {
		reader = createReader();
		if (reader.getCompressionCodec() == null)
			return null;
		return reader.getCompressionCodec().getClass();
	} catch (final Exception e) {
		throw new RuntimeException(e);
	} finally {
		if (reader != null)
			try {
				reader.close();
			} catch (final IOException e1) {
			}
	}
}
 
开发者ID:openimaj,项目名称:openimaj,代码行数:24,代码来源:SequenceFileUtility.java

示例15: testIFileWriterWithCodec

import org.apache.hadoop.io.compress.DefaultCodec; //导入依赖的package包/类
@Test
/**
 * Create an IFile.Writer using GzipCodec since this code does not
 * have a compressor when run via the tests (ie no native libraries).
 */
public void testIFileWriterWithCodec() throws Exception {
  Configuration conf = new Configuration();
  FileSystem localFs = FileSystem.getLocal(conf);
  FileSystem rfs = ((LocalFileSystem)localFs).getRaw();
  Path path = new Path(new Path("build/test.ifile"), "data");
  DefaultCodec codec = new GzipCodec();
  codec.setConf(conf);
  IFile.Writer<Text, Text> writer =
    new IFile.Writer<Text, Text>(conf, rfs, path, Text.class, Text.class,
                                 codec, null);
  writer.close();
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:18,代码来源:TestIFile.java


注:本文中的org.apache.hadoop.io.compress.DefaultCodec类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。