当前位置: 首页>>代码示例>>Java>>正文


Java CompressionType类代码示例

本文整理汇总了Java中org.apache.hadoop.io.SequenceFile.CompressionType的典型用法代码示例。如果您正苦于以下问题:Java CompressionType类的具体用法?Java CompressionType怎么用?Java CompressionType使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


CompressionType类属于org.apache.hadoop.io.SequenceFile包,在下文中一共展示了CompressionType类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: open

import org.apache.hadoop.io.SequenceFile.CompressionType; //导入依赖的package包/类
protected void open(Path dstPath, CompressionCodec codeC,
    CompressionType compType, Configuration conf, FileSystem hdfs)
        throws IOException {
  if(useRawLocalFileSystem) {
    if(hdfs instanceof LocalFileSystem) {
      hdfs = ((LocalFileSystem)hdfs).getRaw();
    } else {
      logger.warn("useRawLocalFileSystem is set to true but file system " +
          "is not of type LocalFileSystem: " + hdfs.getClass().getName());
    }
  }
  if (conf.getBoolean("hdfs.append.support", false) == true && hdfs.isFile
          (dstPath)) {
    outStream = hdfs.append(dstPath);
  } else {
    outStream = hdfs.create(dstPath);
  }
  writer = SequenceFile.createWriter(conf, outStream,
      serializer.getKeyClass(), serializer.getValueClass(), compType, codeC);

  registerCurrentStream(outStream, hdfs, dstPath);
}
 
开发者ID:Transwarp-DE,项目名称:Transwarp-Sample-Code,代码行数:23,代码来源:HDFSSequenceFile.java

示例2: open

import org.apache.hadoop.io.SequenceFile.CompressionType; //导入依赖的package包/类
protected void open(Path dstPath, CompressionCodec codeC,
    CompressionType compType, Configuration conf, FileSystem hdfs)
        throws IOException {
  if (useRawLocalFileSystem) {
    if (hdfs instanceof LocalFileSystem) {
      hdfs = ((LocalFileSystem)hdfs).getRaw();
    } else {
      logger.warn("useRawLocalFileSystem is set to true but file system " +
          "is not of type LocalFileSystem: " + hdfs.getClass().getName());
    }
  }
  if (conf.getBoolean("hdfs.append.support", false) == true && hdfs.isFile(dstPath)) {
    outStream = hdfs.append(dstPath);
  } else {
    outStream = hdfs.create(dstPath);
  }
  writer = SequenceFile.createWriter(conf, outStream,
      serializer.getKeyClass(), serializer.getValueClass(), compType, codeC);

  registerCurrentStream(outStream, hdfs, dstPath);
}
 
开发者ID:moueimei,项目名称:flume-release-1.7.0,代码行数:22,代码来源:HDFSSequenceFile.java

示例3: codecTestMapFile

import org.apache.hadoop.io.SequenceFile.CompressionType; //导入依赖的package包/类
private void codecTestMapFile(Class<? extends CompressionCodec> clazz,
    CompressionType type, int records) throws Exception {
  
  FileSystem fs = FileSystem.get(conf);
  LOG.info("Creating MapFiles with " + records  + 
          " records using codec " + clazz.getSimpleName());
  Path path = new Path(new Path(
      System.getProperty("test.build.data", "/tmp")),
    clazz.getSimpleName() + "-" + type + "-" + records);

  LOG.info("Writing " + path);
  createMapFile(conf, fs, path, clazz.newInstance(), type, records);
  MapFile.Reader reader = new MapFile.Reader(path, conf);
  Text key1 = new Text("002");
  assertNotNull(reader.get(key1, new Text()));
  Text key2 = new Text("004");
  assertNotNull(reader.get(key2, new Text()));
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:19,代码来源:TestCodec.java

示例4: testRecursiveSeqFileCreate

import org.apache.hadoop.io.SequenceFile.CompressionType; //导入依赖的package包/类
public void testRecursiveSeqFileCreate() throws IOException {
  FileSystem fs = FileSystem.getLocal(conf);
  Path name = new Path(new Path(System.getProperty("test.build.data","."),
      "recursiveCreateDir") , "file");
  boolean createParent = false;

  try {
    SequenceFile.createWriter(fs, conf, name, RandomDatum.class,
        RandomDatum.class, 512, (short) 1, 4096, createParent,
        CompressionType.NONE, null, new Metadata());
    fail("Expected an IOException due to missing parent");
  } catch (IOException ioe) {
    // Expected
  }

  createParent = true;
  SequenceFile.createWriter(fs, conf, name, RandomDatum.class,
      RandomDatum.class, 512, (short) 1, 4096, createParent,
      CompressionType.NONE, null, new Metadata());
  // should succeed, fails if exception thrown
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:TestSequenceFile.java

示例5: writeTest

import org.apache.hadoop.io.SequenceFile.CompressionType; //导入依赖的package包/类
@SuppressWarnings("deprecation")
private void writeTest(FileSystem fs, int count, int seed, Path file, 
                              CompressionType compressionType, CompressionCodec codec)
  throws IOException {
  fs.delete(file, true);
  LOG.info("creating " + count + " records with " + compressionType +
           " compression");
  SequenceFile.Writer writer = 
    SequenceFile.createWriter(fs, conf, file, 
                              RandomDatum.class, RandomDatum.class, compressionType, codec);
  RandomDatum.Generator generator = new RandomDatum.Generator(seed);
  for (int i = 0; i < count; i++) {
    generator.next();
    RandomDatum key = generator.getKey();
    RandomDatum value = generator.getValue();

    writer.append(key, value);
  }
  writer.close();
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:21,代码来源:TestSequenceFile.java

示例6: writeMetadataTest

import org.apache.hadoop.io.SequenceFile.CompressionType; //导入依赖的package包/类
@SuppressWarnings("deprecation")
private void writeMetadataTest(FileSystem fs, int count, int seed, Path file, 
                                      CompressionType compressionType, CompressionCodec codec, SequenceFile.Metadata metadata)
  throws IOException {
  fs.delete(file, true);
  LOG.info("creating " + count + " records with metadata and with " + compressionType +
           " compression");
  SequenceFile.Writer writer = 
    SequenceFile.createWriter(fs, conf, file, 
                              RandomDatum.class, RandomDatum.class, compressionType, codec, null, metadata);
  RandomDatum.Generator generator = new RandomDatum.Generator(seed);
  for (int i = 0; i < count; i++) {
    generator.next();
    RandomDatum key = generator.getKey();
    RandomDatum value = generator.getValue();

    writer.append(key, value);
  }
  writer.close();
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:21,代码来源:TestSequenceFile.java

示例7: testRecursiveSeqFileCreate

import org.apache.hadoop.io.SequenceFile.CompressionType; //导入依赖的package包/类
@SuppressWarnings("deprecation")
@Test
public void testRecursiveSeqFileCreate() throws IOException {
  FileSystem fs = FileSystem.getLocal(conf);
  Path name = new Path(new Path(System.getProperty("test.build.data","."),
      "recursiveCreateDir") , "file");
  boolean createParent = false;

  try {
    SequenceFile.createWriter(fs, conf, name, RandomDatum.class,
        RandomDatum.class, 512, (short) 1, 4096, createParent,
        CompressionType.NONE, null, new Metadata());
    fail("Expected an IOException due to missing parent");
  } catch (IOException ioe) {
    // Expected
  }

  createParent = true;
  SequenceFile.createWriter(fs, conf, name, RandomDatum.class,
      RandomDatum.class, 512, (short) 1, 4096, createParent,
      CompressionType.NONE, null, new Metadata());
  // should succeed, fails if exception thrown
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:24,代码来源:TestSequenceFile.java

示例8: testEventCountingRoller

import org.apache.hadoop.io.SequenceFile.CompressionType; //导入依赖的package包/类
@Test
public void testEventCountingRoller() throws IOException, InterruptedException {
  int maxEvents = 100;
  MockHDFSWriter hdfsWriter = new MockHDFSWriter();
  BucketWriter bucketWriter = new BucketWriter(
      0, 0, maxEvents, 0, ctx, "/tmp", "file", "", ".tmp", null, null,
      SequenceFile.CompressionType.NONE, hdfsWriter, timedRollerPool, proxy,
      new SinkCounter("test-bucket-writer-" + System.currentTimeMillis()), 0, null, null, 30000,
      Executors.newSingleThreadExecutor(), 0, 0);

  Event e = EventBuilder.withBody("foo", Charsets.UTF_8);
  for (int i = 0; i < 1000; i++) {
    bucketWriter.append(e);
  }

  logger.info("Number of events written: {}", hdfsWriter.getEventsWritten());
  logger.info("Number of bytes written: {}", hdfsWriter.getBytesWritten());
  logger.info("Number of files opened: {}", hdfsWriter.getFilesOpened());

  Assert.assertEquals("events written", 1000, hdfsWriter.getEventsWritten());
  Assert.assertEquals("bytes written", 3000, hdfsWriter.getBytesWritten());
  Assert.assertEquals("files opened", 10, hdfsWriter.getFilesOpened());
}
 
开发者ID:moueimei,项目名称:flume-release-1.7.0,代码行数:24,代码来源:TestBucketWriter.java

示例9: testSizeRoller

import org.apache.hadoop.io.SequenceFile.CompressionType; //导入依赖的package包/类
@Test
public void testSizeRoller() throws IOException, InterruptedException {
  int maxBytes = 300;
  MockHDFSWriter hdfsWriter = new MockHDFSWriter();
  BucketWriter bucketWriter = new BucketWriter(
      0, maxBytes, 0, 0, ctx, "/tmp", "file", "", ".tmp", null, null,
      SequenceFile.CompressionType.NONE, hdfsWriter, timedRollerPool, proxy,
      new SinkCounter("test-bucket-writer-" + System.currentTimeMillis()), 0, null, null, 30000,
      Executors.newSingleThreadExecutor(), 0, 0);

  Event e = EventBuilder.withBody("foo", Charsets.UTF_8);
  for (int i = 0; i < 1000; i++) {
    bucketWriter.append(e);
  }

  logger.info("Number of events written: {}", hdfsWriter.getEventsWritten());
  logger.info("Number of bytes written: {}", hdfsWriter.getBytesWritten());
  logger.info("Number of files opened: {}", hdfsWriter.getFilesOpened());

  Assert.assertEquals("events written", 1000, hdfsWriter.getEventsWritten());
  Assert.assertEquals("bytes written", 3000, hdfsWriter.getBytesWritten());
  Assert.assertEquals("files opened", 10, hdfsWriter.getFilesOpened());
}
 
开发者ID:moueimei,项目名称:flume-release-1.7.0,代码行数:24,代码来源:TestBucketWriter.java

示例10: Writer

import org.apache.hadoop.io.SequenceFile.CompressionType; //导入依赖的package包/类
/** Create the named map for keys of the named class. 
 * @deprecated Use Writer(Configuration, Path, Option...) instead.
 */
@Deprecated
public Writer(Configuration conf, FileSystem fs, String dirName,
              Class<? extends WritableComparable> keyClass, Class valClass,
              CompressionType compress) throws IOException {
  this(conf, new Path(dirName), keyClass(keyClass),
       valueClass(valClass), compression(compress));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:11,代码来源:MapFile.java

示例11: testInUsePrefix

import org.apache.hadoop.io.SequenceFile.CompressionType; //导入依赖的package包/类
@Test
public void testInUsePrefix() throws IOException, InterruptedException {
  final int ROLL_INTERVAL = 1000; // seconds. Make sure it doesn't change in course of test
  final String PREFIX = "BRNO_IS_CITY_IN_CZECH_REPUBLIC";

  MockHDFSWriter hdfsWriter = new MockHDFSWriter();
  HDFSTextSerializer formatter = new HDFSTextSerializer();
  BucketWriter bucketWriter = new BucketWriter(
      ROLL_INTERVAL, 0, 0, 0, ctx, "/tmp", "file", PREFIX, ".tmp", null, null,
      SequenceFile.CompressionType.NONE, hdfsWriter, timedRollerPool, proxy,
      new SinkCounter("test-bucket-writer-" + System.currentTimeMillis()), 0, null, null, 30000,
      Executors.newSingleThreadExecutor(), 0, 0);

  Event e = EventBuilder.withBody("foo", Charsets.UTF_8);
  bucketWriter.append(e);

  Assert.assertTrue("Incorrect in use prefix", hdfsWriter.getOpenedFilePath().contains(PREFIX));
}
 
开发者ID:moueimei,项目名称:flume-release-1.7.0,代码行数:19,代码来源:TestBucketWriter.java

示例12: testCallbackOnClose

import org.apache.hadoop.io.SequenceFile.CompressionType; //导入依赖的package包/类
@Test
public void testCallbackOnClose() throws IOException, InterruptedException {
  final int ROLL_INTERVAL = 1000; // seconds. Make sure it doesn't change in course of test
  final String SUFFIX = "WELCOME_TO_THE_EREBOR";
  final AtomicBoolean callbackCalled = new AtomicBoolean(false);

  MockHDFSWriter hdfsWriter = new MockHDFSWriter();
  BucketWriter bucketWriter = new BucketWriter(
      ROLL_INTERVAL, 0, 0, 0, ctx, "/tmp", "file", "", SUFFIX, null, null,
      SequenceFile.CompressionType.NONE, hdfsWriter, timedRollerPool, proxy,
      new SinkCounter("test-bucket-writer-" + System.currentTimeMillis()), 0,
      new HDFSEventSink.WriterCallback() {
        @Override
        public void run(String filePath) {
          callbackCalled.set(true);
        }
      }, "blah", 30000, Executors.newSingleThreadExecutor(), 0, 0);

  Event e = EventBuilder.withBody("foo", Charsets.UTF_8);
  bucketWriter.append(e);
  bucketWriter.close(true);

  Assert.assertTrue(callbackCalled.get());
}
 
开发者ID:moueimei,项目名称:flume-release-1.7.0,代码行数:25,代码来源:TestBucketWriter.java

示例13: open

import org.apache.hadoop.io.SequenceFile.CompressionType; //导入依赖的package包/类
@Override
public void open(String filePath, CompressionCodec codeC, CompressionType compType)
    throws IOException {
  super.open(filePath, codeC, compType);
  if (closed) {
    opened = true;
  }
}
 
开发者ID:moueimei,项目名称:flume-release-1.7.0,代码行数:9,代码来源:HDFSTestSeqWriter.java

示例14: writeMetadataTest

import org.apache.hadoop.io.SequenceFile.CompressionType; //导入依赖的package包/类
private void writeMetadataTest(FileSystem fs, int count, int seed, Path file, 
                                      CompressionType compressionType, CompressionCodec codec, SequenceFile.Metadata metadata)
  throws IOException {
  fs.delete(file, true);
  LOG.info("creating " + count + " records with metadata and with " + compressionType +
           " compression");
  SequenceFile.Writer writer = 
    SequenceFile.createWriter(fs, conf, file, 
                              RandomDatum.class, RandomDatum.class, compressionType, codec, null, metadata);
  RandomDatum.Generator generator = new RandomDatum.Generator(seed);
  for (int i = 0; i < count; i++) {
    generator.next();
    RandomDatum key = generator.getKey();
    RandomDatum value = generator.getValue();

    writer.append(key, value);
  }
  writer.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestSequenceFile.java

示例15: createInputFile

import org.apache.hadoop.io.SequenceFile.CompressionType; //导入依赖的package包/类
private void createInputFile(String rootName) throws IOException {
  cleanup();  // clean up if previous run failed

  Path inputFile = new Path(MAP_INPUT_DIR, "in_file");
  SequenceFile.Writer writer =
    SequenceFile.createWriter(fs, fsConfig, inputFile, 
                              Text.class, LongWritable.class, CompressionType.NONE);
  
  try {
    nrFiles = 0;
    listSubtree(new Path(rootName), writer);
  } finally {
    writer.close();
  }
  LOG.info("Created map input files.");
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:DistributedFSCheck.java


注:本文中的org.apache.hadoop.io.SequenceFile.CompressionType类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。