当前位置: 首页>>代码示例>>Java>>正文


Java ReflectionUtils.setConf方法代码示例

本文整理汇总了Java中org.apache.hadoop.util.ReflectionUtils.setConf方法的典型用法代码示例。如果您正苦于以下问题:Java ReflectionUtils.setConf方法的具体用法?Java ReflectionUtils.setConf怎么用?Java ReflectionUtils.setConf使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.util.ReflectionUtils的用法示例。


在下文中一共展示了ReflectionUtils.setConf方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: get

import org.apache.hadoop.util.ReflectionUtils; //导入方法依赖的package包/类
/** Get a comparator for a {@link WritableComparable} implementation. */
public static WritableComparator get(
    Class<? extends WritableComparable> c, Configuration conf) {
  WritableComparator comparator = comparators.get(c);
  if (comparator == null) {
    // force the static initializers to run
    forceInit(c);
    // look to see if it is defined now
    comparator = comparators.get(c);
    // if not, use the generic one
    if (comparator == null) {
      comparator = new WritableComparator(c, conf, true);
    }
  }
  // Newly passed Configuration objects should be used.
  ReflectionUtils.setConf(comparator, conf);
  return comparator;
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:19,代码来源:WritableComparator.java

示例2: testGzipEmpty

import org.apache.hadoop.util.ReflectionUtils; //导入方法依赖的package包/类
/**
 * Test using the gzip codec and an empty input file
 */
@Test (timeout=5000)
public void testGzipEmpty() throws IOException {
  JobConf job = new JobConf(defaultConf);
  CompressionCodec gzip = new GzipCodec();
  ReflectionUtils.setConf(gzip, job);
  localFs.delete(workDir, true);
  writeFile(localFs, new Path(workDir, "empty.gz"), gzip, "");
  FileInputFormat.setInputPaths(job, workDir);
  TextInputFormat format = new TextInputFormat();
  format.configure(job);
  InputSplit[] splits = format.getSplits(job, 100);
  assertEquals("Compressed files of length 0 are not returned from FileInputFormat.getSplits().",
               1, splits.length);
  List<Text> results = readSplit(format, splits[0], job);
  assertEquals("Compressed empty file length == 0", 0, results.size());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestTextInputFormat.java

示例3: testGzip

import org.apache.hadoop.util.ReflectionUtils; //导入方法依赖的package包/类
/**
 * Test using the gzip codec for reading
 */
public static void testGzip() throws IOException {
  JobConf job = new JobConf();
  CompressionCodec gzip = new GzipCodec();
  ReflectionUtils.setConf(gzip, job);
  localFs.delete(workDir, true);
  writeFile(localFs, new Path(workDir, "part1.txt.gz"), gzip, 
            "line-1\tthe quick\nline-2\tbrown\nline-3\tfox jumped\nline-4\tover\nline-5\t the lazy\nline-6\t dog\n");
  writeFile(localFs, new Path(workDir, "part2.txt.gz"), gzip,
            "line-1\tthis is a test\nline-1\tof gzip\n");
  FileInputFormat.setInputPaths(job, workDir);
  KeyValueTextInputFormat format = new KeyValueTextInputFormat();
  format.configure(job);
  InputSplit[] splits = format.getSplits(job, 100);
  assertEquals("compressed splits == 2", 2, splits.length);
  FileSplit tmp = (FileSplit) splits[0];
  if (tmp.getPath().getName().equals("part2.txt.gz")) {
    splits[0] = splits[1];
    splits[1] = tmp;
  }
  List<Text> results = readSplit(format, splits[0], job);
  assertEquals("splits[0] length", 6, results.size());
  assertEquals("splits[0][5]", " dog", results.get(5).toString());
  results = readSplit(format, splits[1], job);
  assertEquals("splits[1] length", 2, results.size());
  assertEquals("splits[1][0]", "this is a test", 
               results.get(0).toString());    
  assertEquals("splits[1][1]", "of gzip", 
               results.get(1).toString());    
}
 
开发者ID:naver,项目名称:hadoop,代码行数:33,代码来源:TestKeyValueTextInputFormat.java

示例4: testGzip

import org.apache.hadoop.util.ReflectionUtils; //导入方法依赖的package包/类
/**
 * Test using the gzip codec for reading
 */
@Test (timeout=5000)
public void testGzip() throws IOException {
  JobConf job = new JobConf(defaultConf);
  CompressionCodec gzip = new GzipCodec();
  ReflectionUtils.setConf(gzip, job);
  localFs.delete(workDir, true);
  writeFile(localFs, new Path(workDir, "part1.txt.gz"), gzip, 
            "the quick\nbrown\nfox jumped\nover\n the lazy\n dog\n");
  writeFile(localFs, new Path(workDir, "part2.txt.gz"), gzip,
            "this is a test\nof gzip\n");
  FileInputFormat.setInputPaths(job, workDir);
  TextInputFormat format = new TextInputFormat();
  format.configure(job);
  InputSplit[] splits = format.getSplits(job, 100);
  assertEquals("compressed splits == 2", 2, splits.length);
  FileSplit tmp = (FileSplit) splits[0];
  if (tmp.getPath().getName().equals("part2.txt.gz")) {
    splits[0] = splits[1];
    splits[1] = tmp;
  }
  List<Text> results = readSplit(format, splits[0], job);
  assertEquals("splits[0] length", 6, results.size());
  assertEquals("splits[0][5]", " dog", results.get(5).toString());
  results = readSplit(format, splits[1], job);
  assertEquals("splits[1] length", 2, results.size());
  assertEquals("splits[1][0]", "this is a test", 
               results.get(0).toString());    
  assertEquals("splits[1][1]", "of gzip", 
               results.get(1).toString());    
}
 
开发者ID:naver,项目名称:hadoop,代码行数:34,代码来源:TestTextInputFormat.java

示例5: testGzip

import org.apache.hadoop.util.ReflectionUtils; //导入方法依赖的package包/类
/**
 * Test using the gzip codec for reading
 */
@Test(timeout=10000)
public void testGzip() throws IOException {
  JobConf job = new JobConf(defaultConf);
  CompressionCodec gzip = new GzipCodec();
  ReflectionUtils.setConf(gzip, job);
  localFs.delete(workDir, true);
  writeFile(localFs, new Path(workDir, "part1.txt.gz"), gzip,
            "the quick\nbrown\nfox jumped\nover\n the lazy\n dog\n");
  writeFile(localFs, new Path(workDir, "part2.txt.gz"), gzip,
            "this is a test\nof gzip\n");
  FileInputFormat.setInputPaths(job, workDir);
  CombineTextInputFormat format = new CombineTextInputFormat();
  InputSplit[] splits = format.getSplits(job, 100);
  assertEquals("compressed splits == 1", 1, splits.length);
  List<Text> results = readSplit(format, splits[0], job);
  assertEquals("splits[0] length", 8, results.size());

  final String[] firstList =
    {"the quick", "brown", "fox jumped", "over", " the lazy", " dog"};
  final String[] secondList = {"this is a test", "of gzip"};
  String first = results.get(0).toString();
  if (first.equals(firstList[0])) {
    testResults(results, firstList, secondList);
  } else if (first.equals(secondList[0])) {
    testResults(results, secondList, firstList);
  } else {
    fail("unexpected first token!");
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:33,代码来源:TestCombineTextInputFormat.java

示例6: testGzipWithTwoInputs

import org.apache.hadoop.util.ReflectionUtils; //导入方法依赖的package包/类
/**
 * Test using the gzip codec with two input files.
 */
@Test (timeout=5000)
public void testGzipWithTwoInputs() throws IOException {
  CompressionCodec gzip = new GzipCodec();
  localFs.delete(workDir, true);
  FixedLengthInputFormat format = new FixedLengthInputFormat();
  JobConf job = new JobConf(defaultConf);
  format.setRecordLength(job, 5);
  FileInputFormat.setInputPaths(job, workDir);
  ReflectionUtils.setConf(gzip, job);
  format.configure(job);
  // Create files with fixed length records with 5 byte long records.
  writeFile(localFs, new Path(workDir, "part1.txt.gz"), gzip, 
      "one  two  threefour five six  seveneightnine ten  ");
  writeFile(localFs, new Path(workDir, "part2.txt.gz"), gzip,
      "ten  nine eightsevensix  five four threetwo  one  ");
  InputSplit[] splits = format.getSplits(job, 100);
  assertEquals("compressed splits == 2", 2, splits.length);
  FileSplit tmp = (FileSplit) splits[0];
  if (tmp.getPath().getName().equals("part2.txt.gz")) {
    splits[0] = splits[1];
    splits[1] = tmp;
  }
  List<String> results = readSplit(format, splits[0], job);
  assertEquals("splits[0] length", 10, results.size());
  assertEquals("splits[0][5]", "six  ", results.get(5));
  results = readSplit(format, splits[1], job);
  assertEquals("splits[1] length", 10, results.size());
  assertEquals("splits[1][0]", "ten  ", results.get(0));
  assertEquals("splits[1][1]", "nine ", results.get(1));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:34,代码来源:TestFixedLengthInputFormat.java

示例7: runPartialRecordTest

import org.apache.hadoop.util.ReflectionUtils; //导入方法依赖的package包/类
private void runPartialRecordTest(CompressionCodec codec) throws IOException {
  localFs.delete(workDir, true);
  // Create a file with fixed length records with 5 byte long
  // records with a partial record at the end.
  StringBuilder fileName = new StringBuilder("testFormat.txt");
  if (codec != null) {
    fileName.append(".gz");
  }
  FixedLengthInputFormat format = new FixedLengthInputFormat();
  JobConf job = new JobConf(defaultConf);
  format.setRecordLength(job, 5);
  FileInputFormat.setInputPaths(job, workDir);
  if (codec != null) {
    ReflectionUtils.setConf(codec, job);
  }
  format.configure(job);
  writeFile(localFs, new Path(workDir, fileName.toString()), codec,
          "one  two  threefour five six  seveneightnine ten");
  InputSplit[] splits = format.getSplits(job, 100);
  if (codec != null) {
    assertEquals("compressed splits == 1", 1, splits.length);
  }
  boolean exceptionThrown = false;
  for (InputSplit split : splits) {
    try {
      List<String> results = readSplit(format, split, job);
    } catch(IOException ioe) {
      exceptionThrown = true;
      LOG.info("Exception message:" + ioe.getMessage());
    }
  }
  assertTrue("Exception for partial record:", exceptionThrown);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:34,代码来源:TestFixedLengthInputFormat.java

示例8: testGzip

import org.apache.hadoop.util.ReflectionUtils; //导入方法依赖的package包/类
/**
 * Test using the gzip codec for reading
 */
@Test
public void testGzip() throws IOException, InterruptedException {
  Configuration conf = new Configuration(defaultConf);
  CompressionCodec gzip = new GzipCodec();
  ReflectionUtils.setConf(gzip, conf);
  localFs.delete(workDir, true);
  writeFile(localFs, new Path(workDir, "part1.txt.gz"), gzip, 
            "line-1\tthe quick\nline-2\tbrown\nline-3\t" +
            "fox jumped\nline-4\tover\nline-5\t the lazy\nline-6\t dog\n");
  writeFile(localFs, new Path(workDir, "part2.txt.gz"), gzip,
            "line-1\tthis is a test\nline-1\tof gzip\n");
  Job job = Job.getInstance(conf);
  FileInputFormat.setInputPaths(job, workDir);
  KeyValueTextInputFormat format = new KeyValueTextInputFormat();
  List<InputSplit> splits = format.getSplits(job);
  assertEquals("compressed splits == 2", 2, splits.size());
  FileSplit tmp = (FileSplit) splits.get(0);
  if (tmp.getPath().getName().equals("part2.txt.gz")) {
    splits.set(0, splits.get(1));
    splits.set(1, tmp);
  }
  List<Text> results = readSplit(format, splits.get(0), job);
  assertEquals("splits[0] length", 6, results.size());
  assertEquals("splits[0][0]", "the quick", results.get(0).toString());
  assertEquals("splits[0][1]", "brown", results.get(1).toString());
  assertEquals("splits[0][2]", "fox jumped", results.get(2).toString());
  assertEquals("splits[0][3]", "over", results.get(3).toString());
  assertEquals("splits[0][4]", " the lazy", results.get(4).toString());
  assertEquals("splits[0][5]", " dog", results.get(5).toString());
  results = readSplit(format, splits.get(1), job);
  assertEquals("splits[1] length", 2, results.size());
  assertEquals("splits[1][0]", "this is a test", 
               results.get(0).toString());    
  assertEquals("splits[1][1]", "of gzip", 
               results.get(1).toString());    
}
 
开发者ID:naver,项目名称:hadoop,代码行数:40,代码来源:TestMRKeyValueTextInputFormat.java

示例9: testGzip

import org.apache.hadoop.util.ReflectionUtils; //导入方法依赖的package包/类
/**
 * Test using the gzip codec for reading
 */
@Test(timeout=10000)
public void testGzip() throws IOException, InterruptedException {
  Configuration conf = new Configuration(defaultConf);
  CompressionCodec gzip = new GzipCodec();
  ReflectionUtils.setConf(gzip, conf);
  localFs.delete(workDir, true);
  writeFile(localFs, new Path(workDir, "part1.txt.gz"), gzip,
            "the quick\nbrown\nfox jumped\nover\n the lazy\n dog\n");
  writeFile(localFs, new Path(workDir, "part2.txt.gz"), gzip,
            "this is a test\nof gzip\n");
  Job job = Job.getInstance(conf);
  FileInputFormat.setInputPaths(job, workDir);
  CombineTextInputFormat format = new CombineTextInputFormat();
  List<InputSplit> splits = format.getSplits(job);
  assertEquals("compressed splits == 1", 1, splits.size());
  List<Text> results = readSplit(format, splits.get(0), job);
  assertEquals("splits[0] length", 8, results.size());

  final String[] firstList =
    {"the quick", "brown", "fox jumped", "over", " the lazy", " dog"};
  final String[] secondList = {"this is a test", "of gzip"};
  String first = results.get(0).toString();
  if (first.equals(firstList[0])) {
    testResults(results, firstList, secondList);
  } else if (first.equals(secondList[0])) {
    testResults(results, secondList, firstList);
  } else {
    fail("unexpected first token!");
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:34,代码来源:TestCombineTextInputFormat.java

示例10: testGzipWithTwoInputs

import org.apache.hadoop.util.ReflectionUtils; //导入方法依赖的package包/类
/**
 * Test using the gzip codec with two input files.
 */
@Test (timeout=5000)
public void testGzipWithTwoInputs() throws Exception {
  CompressionCodec gzip = new GzipCodec();
  localFs.delete(workDir, true);
  Job job = Job.getInstance(defaultConf);
  FixedLengthInputFormat format = new FixedLengthInputFormat();
  format.setRecordLength(job.getConfiguration(), 5);
  ReflectionUtils.setConf(gzip, job.getConfiguration());
  FileInputFormat.setInputPaths(job, workDir);
  // Create files with fixed length records with 5 byte long records.
  writeFile(localFs, new Path(workDir, "part1.txt.gz"), gzip, 
      "one  two  threefour five six  seveneightnine ten  ");
  writeFile(localFs, new Path(workDir, "part2.txt.gz"), gzip,
      "ten  nine eightsevensix  five four threetwo  one  ");
  List<InputSplit> splits = format.getSplits(job);
  assertEquals("compressed splits == 2", 2, splits.size());
  FileSplit tmp = (FileSplit) splits.get(0);
  if (tmp.getPath().getName().equals("part2.txt.gz")) {
    splits.set(0, splits.get(1));
    splits.set(1, tmp);
  }
  List<String> results = readSplit(format, splits.get(0), job);
  assertEquals("splits[0] length", 10, results.size());
  assertEquals("splits[0][5]", "six  ", results.get(5));
  results = readSplit(format, splits.get(1), job);
  assertEquals("splits[1] length", 10, results.size());
  assertEquals("splits[1][0]", "ten  ", results.get(0));
  assertEquals("splits[1][1]", "nine ", results.get(1));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:33,代码来源:TestFixedLengthInputFormat.java

示例11: runPartialRecordTest

import org.apache.hadoop.util.ReflectionUtils; //导入方法依赖的package包/类
private void runPartialRecordTest(CompressionCodec codec) throws Exception {
  localFs.delete(workDir, true);
  Job job = Job.getInstance(defaultConf);
  // Create a file with fixed length records with 5 byte long
  // records with a partial record at the end.
  StringBuilder fileName = new StringBuilder("testFormat.txt");
  if (codec != null) {
    fileName.append(".gz");
    ReflectionUtils.setConf(codec, job.getConfiguration());
  }
  writeFile(localFs, new Path(workDir, fileName.toString()), codec,
      "one  two  threefour five six  seveneightnine ten");
  FixedLengthInputFormat format = new FixedLengthInputFormat();
  format.setRecordLength(job.getConfiguration(), 5);
  FileInputFormat.setInputPaths(job, workDir);
  List<InputSplit> splits = format.getSplits(job);
  if (codec != null) {
    assertEquals("compressed splits == 1", 1, splits.size());
  }
  boolean exceptionThrown = false;
  for (InputSplit split : splits) {
    try {
      List<String> results = readSplit(format, split, job);
    } catch(IOException ioe) {
      exceptionThrown = true;
      LOG.info("Exception message:" + ioe.getMessage());
    }
  }
  assertTrue("Exception for partial record:", exceptionThrown);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:31,代码来源:TestFixedLengthInputFormat.java

示例12: init

import org.apache.hadoop.util.ReflectionUtils; //导入方法依赖的package包/类
/** Initialize. */
@SuppressWarnings("unchecked")
void init(Configuration conf, FSDataOutputStream out, boolean ownStream,
          Class keyClass, Class valClass,
          CompressionCodec codec, Metadata metadata) 
  throws IOException {
  this.conf = conf;
  this.out = out;
  this.ownOutputStream = ownStream;
  this.keyClass = keyClass;
  this.valClass = valClass;
  this.codec = codec;
  this.metadata = metadata;
  SerializationFactory serializationFactory = new SerializationFactory(conf);
  this.keySerializer = serializationFactory.getSerializer(keyClass);
  if (this.keySerializer == null) {
    throw new IOException(
        "Could not find a serializer for the Key class: '"
            + keyClass.getCanonicalName() + "'. "
            + "Please ensure that the configuration '" +
            CommonConfigurationKeys.IO_SERIALIZATIONS_KEY + "' is "
            + "properly configured, if you're using"
            + "custom serialization.");
  }
  this.keySerializer.open(buffer);
  this.uncompressedValSerializer = serializationFactory.getSerializer(valClass);
  if (this.uncompressedValSerializer == null) {
    throw new IOException(
        "Could not find a serializer for the Value class: '"
            + valClass.getCanonicalName() + "'. "
            + "Please ensure that the configuration '" +
            CommonConfigurationKeys.IO_SERIALIZATIONS_KEY + "' is "
            + "properly configured, if you're using"
            + "custom serialization.");
  }
  this.uncompressedValSerializer.open(buffer);
  if (this.codec != null) {
    ReflectionUtils.setConf(this.codec, this.conf);
    this.compressor = CodecPool.getCompressor(this.codec);
    this.deflateFilter = this.codec.createOutputStream(buffer, compressor);
    this.deflateOut = 
      new DataOutputStream(new BufferedOutputStream(deflateFilter));
    this.compressedValSerializer = serializationFactory.getSerializer(valClass);
    if (this.compressedValSerializer == null) {
      throw new IOException(
          "Could not find a serializer for the Value class: '"
              + valClass.getCanonicalName() + "'. "
              + "Please ensure that the configuration '" +
              CommonConfigurationKeys.IO_SERIALIZATIONS_KEY + "' is "
              + "properly configured, if you're using"
              + "custom serialization.");
    }
    this.compressedValSerializer.open(deflateOut);
  }

  if (appendMode) {
    sync();
  } else {
    writeFileHeader();
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:62,代码来源:SequenceFile.java

示例13: testBzip2

import org.apache.hadoop.util.ReflectionUtils; //导入方法依赖的package包/类
/**
 * Test using the bzip2 codec for reading
 */
@Test
public void testBzip2() throws IOException {
  JobConf jobConf = new JobConf(defaultConf);

  CompressionCodec bzip2 = new BZip2Codec();
  ReflectionUtils.setConf(bzip2, jobConf);
  localFs.delete(workDir, true);

  System.out.println(COLOR_BR_CYAN +
    "testBzip2() using non-native CBZip2InputStream (presumably)" +
    COLOR_NORMAL);

  // copy prebuilt (correct!) version of concat.bz2 to HDFS
  final String fn = "concat" + bzip2.getDefaultExtension();
  Path fnLocal = new Path(System.getProperty("test.concat.data", "/tmp"), fn);
  Path fnHDFS  = new Path(workDir, fn);
  localFs.copyFromLocalFile(fnLocal, fnHDFS);

  writeFile(localFs, new Path(workDir, "part2.txt.bz2"), bzip2,
            "this is a test\nof bzip2\n");
  FileInputFormat.setInputPaths(jobConf, workDir);
  TextInputFormat format = new TextInputFormat();  // extends FileInputFormat
  format.configure(jobConf);
  format.setMinSplitSize(256);  // work around 2-byte splits issue
  // [135 splits for a 208-byte file and a 62-byte file(!)]

  InputSplit[] splits = format.getSplits(jobConf, 100);
  assertEquals("compressed splits == 2", 2, splits.length);
  FileSplit tmp = (FileSplit) splits[0];
  if (tmp.getPath().getName().equals("part2.txt.bz2")) {
    splits[0] = splits[1];
    splits[1] = tmp;
  }

  List<Text> results = readSplit(format, splits[0], jobConf);
  assertEquals("splits[0] num lines", 6, results.size());
  assertEquals("splits[0][5]", "member #3",
               results.get(5).toString());

  results = readSplit(format, splits[1], jobConf);
  assertEquals("splits[1] num lines", 2, results.size());
  assertEquals("splits[1][0]", "this is a test",
               results.get(0).toString());
  assertEquals("splits[1][1]", "of bzip2",
               results.get(1).toString());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:50,代码来源:TestConcatenatedCompressedInput.java

示例14: testMoreBzip2

import org.apache.hadoop.util.ReflectionUtils; //导入方法依赖的package包/类
/**
   * Extended bzip2 test, similar to BuiltInGzipDecompressor test above.
   */
  @Test
  public void testMoreBzip2() throws IOException {
    JobConf jobConf = new JobConf(defaultConf);

    CompressionCodec bzip2 = new BZip2Codec();
    ReflectionUtils.setConf(bzip2, jobConf);
    localFs.delete(workDir, true);

    System.out.println(COLOR_BR_MAGENTA +
      "testMoreBzip2() using non-native CBZip2InputStream (presumably)" +
      COLOR_NORMAL);

    // copy single-member test file to HDFS
    String fn1 = "testConcatThenCompress.txt" + bzip2.getDefaultExtension();
    Path fnLocal1 = new Path(System.getProperty("test.concat.data","/tmp"),fn1);
    Path fnHDFS1  = new Path(workDir, fn1);
    localFs.copyFromLocalFile(fnLocal1, fnHDFS1);

    // copy multiple-member test file to HDFS
    String fn2 = "testCompressThenConcat.txt" + bzip2.getDefaultExtension();
    Path fnLocal2 = new Path(System.getProperty("test.concat.data","/tmp"),fn2);
    Path fnHDFS2  = new Path(workDir, fn2);
    localFs.copyFromLocalFile(fnLocal2, fnHDFS2);

    FileInputFormat.setInputPaths(jobConf, workDir);

    // here's first pair of BlockDecompressorStreams:
    final FileInputStream in1 = new FileInputStream(fnLocal1.toString());
    final FileInputStream in2 = new FileInputStream(fnLocal2.toString());
    assertEquals("concat bytes available", 2567, in1.available());
    assertEquals("concat bytes available", 3056, in2.available());

/*
    // FIXME
    // The while-loop below dies at the beginning of the 2nd concatenated
    // member (after 17 lines successfully read) with:
    //
    //   java.io.IOException: bad block header
    //   at org.apache.hadoop.io.compress.bzip2.CBZip2InputStream.initBlock(
    //   CBZip2InputStream.java:527)
    //
    // It is not critical to concatenated-gzip support, HADOOP-6835, so it's
    // simply commented out for now (and HADOOP-6852 filed).  If and when the
    // latter issue is resolved--perhaps by fixing an error here--this code
    // should be reenabled.  Note that the doMultipleBzip2BufferSizes() test
    // below uses the same testCompressThenConcat.txt.bz2 file but works fine.

    CompressionInputStream cin2 = bzip2.createInputStream(in2);
    LineReader in = new LineReader(cin2);
    Text out = new Text();

    int numBytes, totalBytes=0, lineNum=0;
    while ((numBytes = in.readLine(out)) > 0) {
      ++lineNum;
      totalBytes += numBytes;
    }
    in.close();
    assertEquals("total uncompressed bytes in concatenated test file",
                 5346, totalBytes);
    assertEquals("total uncompressed lines in concatenated test file",
                 84, lineNum);
 */

    // test CBZip2InputStream with lots of different input-buffer sizes
    doMultipleBzip2BufferSizes(jobConf, false);

    // no native version of bzip2 codec (yet?)
    //doMultipleBzip2BufferSizes(jobConf, true);
  }
 
开发者ID:naver,项目名称:hadoop,代码行数:73,代码来源:TestConcatenatedCompressedInput.java

示例15: init

import org.apache.hadoop.util.ReflectionUtils; //导入方法依赖的package包/类
/** Initialize. */
@SuppressWarnings("unchecked")
void init(Configuration conf, FSDataOutputStream out, boolean ownStream,
          Class keyClass, Class valClass,
          CompressionCodec codec, Metadata metadata) 
  throws IOException {
  this.conf = conf;
  this.out = out;
  this.ownOutputStream = ownStream;
  this.keyClass = keyClass;
  this.valClass = valClass;
  this.codec = codec;
  this.metadata = metadata;
  SerializationFactory serializationFactory = new SerializationFactory(conf);
  this.keySerializer = serializationFactory.getSerializer(keyClass);
  if (this.keySerializer == null) {
    throw new IOException(
        "Could not find a serializer for the Key class: '"
            + keyClass.getCanonicalName() + "'. "
            + "Please ensure that the configuration '" +
            CommonConfigurationKeys.IO_SERIALIZATIONS_KEY + "' is "
            + "properly configured, if you're using"
            + "custom serialization.");
  }
  this.keySerializer.open(buffer);
  this.uncompressedValSerializer = serializationFactory.getSerializer(valClass);
  if (this.uncompressedValSerializer == null) {
    throw new IOException(
        "Could not find a serializer for the Value class: '"
            + valClass.getCanonicalName() + "'. "
            + "Please ensure that the configuration '" +
            CommonConfigurationKeys.IO_SERIALIZATIONS_KEY + "' is "
            + "properly configured, if you're using"
            + "custom serialization.");
  }
  this.uncompressedValSerializer.open(buffer);
  if (this.codec != null) {
    ReflectionUtils.setConf(this.codec, this.conf);
    this.compressor = CodecPool.getCompressor(this.codec);
    this.deflateFilter = this.codec.createOutputStream(buffer, compressor);
    this.deflateOut = 
      new DataOutputStream(new BufferedOutputStream(deflateFilter));
    this.compressedValSerializer = serializationFactory.getSerializer(valClass);
    if (this.compressedValSerializer == null) {
      throw new IOException(
          "Could not find a serializer for the Value class: '"
              + valClass.getCanonicalName() + "'. "
              + "Please ensure that the configuration '" +
              CommonConfigurationKeys.IO_SERIALIZATIONS_KEY + "' is "
              + "properly configured, if you're using"
              + "custom serialization.");
    }
    this.compressedValSerializer.open(deflateOut);
  }
  writeFileHeader();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:57,代码来源:SequenceFile.java


注:本文中的org.apache.hadoop.util.ReflectionUtils.setConf方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。