当前位置: 首页>>代码示例>>Java>>正文


Java Compression.Algorithm方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.io.hfile.Compression.Algorithm方法的典型用法代码示例。如果您正苦于以下问题:Java Compression.Algorithm方法的具体用法?Java Compression.Algorithm怎么用?Java Compression.Algorithm使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.io.hfile.Compression的用法示例。


在下文中一共展示了Compression.Algorithm方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getRecordWriter

import org.apache.hadoop.hbase.io.hfile.Compression; //导入方法依赖的package包/类
public RecordWriter<BytesWritable, BytesWritable> getRecordWriter(
        TaskAttemptContext context) throws IOException {
  // Get the path of the temporary output file
  final Path outputPath = FileOutputFormat.getOutputPath(context);
  final Path outputDir = new FileOutputCommitter(outputPath, context).getWorkPath();
  final Configuration conf = context.getConfiguration();
  final FileSystem fs = outputDir.getFileSystem(conf);

  int blockSize = conf.getInt(Constants.HFILE_BLOCKSIZE, 16384);
  // Default to snappy.
  Compression.Algorithm compressionAlgorithm = getAlgorithm(
      conf.get(Constants.HFILE_COMPRESSION));
  final StoreFile.Writer writer =
      new StoreFile.WriterBuilder(conf, new CacheConfig(conf), fs, blockSize)
          .withFilePath(hfilePath(outputPath, context.getTaskAttemptID().getTaskID().getId()))
          .withCompression(compressionAlgorithm)
          .build();
  return new HFileRecordWriter(writer);
}
 
开发者ID:pinterest-attic,项目名称:terrapin,代码行数:20,代码来源:HFileOutputFormat.java

示例2: getMockColumnFamilies

import org.apache.hadoop.hbase.io.hfile.Compression; //导入方法依赖的package包/类
/**
 * @return a map from column family names to compression algorithms for
 *         testing column family compression. Column family names have special characters
 */
private Map<String, Compression.Algorithm> getMockColumnFamilies(int numCfs) {
  Map<String, Compression.Algorithm> familyToCompression = new HashMap<String, Compression.Algorithm>();
  // use column family names having special characters
  if (numCfs-- > 0) {
    familyToCompression.put("[email protected]#[email protected]#&", Compression.Algorithm.LZO);
  }
  if (numCfs-- > 0) {
    familyToCompression.put("Family2=asdads&!AASD", Compression.Algorithm.SNAPPY);
  }
  if (numCfs-- > 0) {
    familyToCompression.put("Family2=asdads&!AASD", Compression.Algorithm.GZ);
  }
  if (numCfs-- > 0) {
    familyToCompression.put("Family3", Compression.Algorithm.NONE);
  }
  return familyToCompression;
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:22,代码来源:TestHFileOutputFormat.java

示例3: colDescFromThrift

import org.apache.hadoop.hbase.io.hfile.Compression; //导入方法依赖的package包/类
/**
 * This utility method creates a new Hbase HColumnDescriptor object based on a
 * Thrift ColumnDescriptor "struct".
 *
 * @param in
 *          Thrift ColumnDescriptor object
 * @return HColumnDescriptor
 * @throws IllegalArgument
 */
static public HColumnDescriptor colDescFromThrift(ColumnDescriptor in)
    throws IllegalArgument {
  Compression.Algorithm comp =
    Compression.getCompressionAlgorithmByName(in.compression.toLowerCase());
  StoreFile.BloomType bt =
    BloomType.valueOf(in.bloomFilterType);

  if (in.name == null || !in.name.hasRemaining()) {
    throw new IllegalArgument("column name is empty");
  }
  byte [] parsedName = KeyValue.parseColumn(Bytes.getBytes(in.name))[0];
  HColumnDescriptor col = new HColumnDescriptor(parsedName)
      .setMaxVersions(in.maxVersions)
      .setCompressionType(comp)
      .setInMemory(in.inMemory)
      .setBlockCacheEnabled(in.blockCacheEnabled)
      .setTimeToLive(in.timeToLive)
      .setBloomFilterType(bt);
  return col;
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:30,代码来源:ThriftUtilities.java

示例4: testCompression

import org.apache.hadoop.hbase.io.hfile.Compression; //导入方法依赖的package包/类
public static boolean testCompression(String codec) {
  codec = codec.toLowerCase();

  Compression.Algorithm a;

  try {
    a = Compression.getCompressionAlgorithmByName(codec);
  } catch (IllegalArgumentException e) {
    LOG.warn("Codec type: " + codec + " is not known");
    return false;
  }

  try {
    testCompression(a);
    return true;
  } catch (IOException ignored) {
    LOG.warn("Can't instantiate codec: " + codec, ignored);
    return false;
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:21,代码来源:CompressionTest.java

示例5: generateColumnDescriptors

import org.apache.hadoop.hbase.io.hfile.Compression; //导入方法依赖的package包/类
/**
 * Create a set of column descriptors with the combination of compression,
 * encoding, bloom codecs available.
 * @param prefix family names prefix
 * @return the list of column descriptors
 */
public static List<HColumnDescriptor> generateColumnDescriptors(final String prefix) {
  List<HColumnDescriptor> htds = new ArrayList<HColumnDescriptor>();
  long familyId = 0;
  for (Compression.Algorithm compressionType: getSupportedCompressionAlgorithms()) {
    for (DataBlockEncoding encodingType: DataBlockEncoding.values()) {
      for (StoreFile.BloomType bloomType: StoreFile.BloomType.values()) {
        String name = String.format("%[email protected]#&-%[email protected]#", prefix, familyId);
        HColumnDescriptor htd = new HColumnDescriptor(name);
        htd.setCompressionType(compressionType);
        htd.setDataBlockEncoding(encodingType);
        htd.setBloomFilterType(bloomType);
        htds.add(htd);
        familyId++;
      }
    }
  }
  return htds;
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:25,代码来源:HBaseTestingUtility.java

示例6: testCreateFamilyCompressionMap

import org.apache.hadoop.hbase.io.hfile.Compression; //导入方法依赖的package包/类
/**
 * Test for
 * {@link HFileOutputFormat#createFamilyCompressionMap(Configuration)}. Tests
 * that the compression map is correctly deserialized from configuration
 *
 * @throws IOException
 */
@Test
public void testCreateFamilyCompressionMap() throws IOException {
  for (int numCfs = 0; numCfs <= 3; numCfs++) {
    Configuration conf = new Configuration(this.util.getConfiguration());
    Map<String, Compression.Algorithm> familyToCompression = getMockColumnFamilies(numCfs);
    HTable table = Mockito.mock(HTable.class);
    setupMockColumnFamilies(table, familyToCompression);
    HFileOutputFormat.configureCompression(table, conf);

    // read back family specific compression setting from the configuration
    Map<byte[], String> retrievedFamilyToCompressionMap = HFileOutputFormat.createFamilyCompressionMap(conf);

    // test that we have a value for all column families that matches with the
    // used mock values
    for (Entry<String, Algorithm> entry : familyToCompression.entrySet()) {
      assertEquals("Compression configuration incorrect for column family:" + entry.getKey(), entry.getValue()
                   .getName(), retrievedFamilyToCompressionMap.get(entry.getKey().getBytes()));
    }
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:28,代码来源:TestHFileOutputFormat.java

示例7: colDescFromThrift

import org.apache.hadoop.hbase.io.hfile.Compression; //导入方法依赖的package包/类
/**
 * This utility method creates a new Hbase HColumnDescriptor object based on a
 * Thrift ColumnDescriptor "struct".
 *
 * @param in
 *          Thrift ColumnDescriptor object
 * @return HColumnDescriptor
 * @throws IllegalArgument
 */
static public HColumnDescriptor colDescFromThrift(ColumnDescriptor in)
    throws IllegalArgument {
  Compression.Algorithm comp =
    Compression.getCompressionAlgorithmByName(in.compression.toLowerCase());
  StoreFile.BloomType bt =
    BloomType.valueOf(in.bloomFilterType);

  if (in.name == null || !in.name.hasRemaining()) {
    throw new IllegalArgument("column name is empty");
  }
  byte [] parsedName = KeyValue.parseColumn(Bytes.getBytes(in.name))[0];
  HColumnDescriptor col = new HColumnDescriptor(parsedName,
      in.maxVersions, comp.getName(), in.inMemory, in.blockCacheEnabled,
      in.timeToLive, bt.toString());
  return col;
}
 
开发者ID:lifeng5042,项目名称:RStore,代码行数:26,代码来源:ThriftUtilities.java

示例8: testCompression

import org.apache.hadoop.hbase.io.hfile.Compression; //导入方法依赖的package包/类
public static void testCompression(Compression.Algorithm algo)
    throws IOException {
  if (compressionTestResults[algo.ordinal()] != null) {
    if (compressionTestResults[algo.ordinal()]) {
      return ; // already passed test, dont do it again.
    } else {
      // failed.
      throw new IOException("Compression algorithm '" + algo.getName() + "'" +
      " previously failed test.");
    }
  }

  Configuration conf = HBaseConfiguration.create();
  try {
    Compressor c = algo.getCompressor();
    algo.returnCompressor(c);
    compressionTestResults[algo.ordinal()] = true; // passes
  } catch (Throwable t) {
    compressionTestResults[algo.ordinal()] = false; // failure
    throw new IOException(t);
  }
}
 
开发者ID:wanhao,项目名称:IRIndex,代码行数:23,代码来源:CompressionTest.java

示例9: Writer

import org.apache.hadoop.hbase.io.hfile.Compression; //导入方法依赖的package包/类
/**
 * Creates an HFile.Writer that also write helpful meta data.
 * @param fs file system to write to
 * @param path file name to create
 * @param blocksize HDFS block size
 * @param compress HDFS block compression
 * @param conf user configuration
 * @param comparator key comparator
 * @param bloomType bloom filter setting
 * @param maxKeys the expected maximum number of keys to be added. Was used
 *        for Bloom filter size in {@link HFile} format version 1.
 * @throws IOException problem writing to FS
 */
public Writer(FileSystem fs, Path path, int blocksize,
    Compression.Algorithm compress, final Configuration conf,
    CacheConfig cacheConf,
    final KVComparator comparator, BloomType bloomType, long maxKeys)
    throws IOException {
  writer = HFile.getWriterFactory(conf, cacheConf).createWriter(
      fs, path, blocksize,
      compress, comparator.getRawComparator());

  this.kvComparator = comparator;

  bloomFilterWriter = BloomFilterFactory.createBloomAtWrite(conf, cacheConf,
      bloomType, (int) Math.min(maxKeys, Integer.MAX_VALUE), writer);
  if (bloomFilterWriter != null) {
    this.bloomType = bloomType;
    LOG.info("Bloom filter type for " + path + ": " + this.bloomType +
        ", "+ bloomFilterWriter.getClass().getSimpleName());
  } else {
    // Not using Bloom filters.
    this.bloomType = BloomType.NONE;
  }
}
 
开发者ID:lifeng5042,项目名称:RStore,代码行数:36,代码来源:StoreFile.java

示例10: getAlgorithm

import org.apache.hadoop.hbase.io.hfile.Compression; //导入方法依赖的package包/类
/**
 * Returns the compression string. Defaults to SNAPPY compression.
 *
 * @param compressionString One of SNAPPY, GZ, LZO, LZ4 or NONE.
 * @return The corresponding Compression.Algorithm enum type.
 */
public static Compression.Algorithm getAlgorithm(String compressionString) {
    Compression.Algorithm compressionAlgo = Compression.Algorithm.SNAPPY;
    if (compressionString == null) {
        return compressionAlgo;
    }
    try {
        compressionAlgo = Compression.Algorithm.valueOf(compressionString);
    } catch (Throwable t) {
        // Use the default.
        return compressionAlgo;
    }
    return compressionAlgo;
}
 
开发者ID:pinterest-attic,项目名称:terrapin,代码行数:20,代码来源:HFileOutputFormat.java

示例11: TestMultiColumnScanner

import org.apache.hadoop.hbase.io.hfile.Compression; //导入方法依赖的package包/类
public TestMultiColumnScanner(Compression.Algorithm comprAlgo,
    StoreFile.BloomType bloomType, boolean useDataBlockEncoding) {
  this.comprAlgo = comprAlgo;
  this.bloomType = bloomType;
  this.dataBlockEncoding = useDataBlockEncoding ? DataBlockEncoding.PREFIX :
      DataBlockEncoding.NONE;
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:8,代码来源:TestMultiColumnScanner.java

示例12: Writer

import org.apache.hadoop.hbase.io.hfile.Compression; //导入方法依赖的package包/类
/**
 * Constructor that takes a Path.
 * @param fs
 * @param path
 * @param blocksize
 * @param compress
 * @param comparator
 * @throws IOException
 */
public Writer(FileSystem fs, Path path, short replication, int blocksize,
  Compression.Algorithm compress,
  final IndexKeyComparator comparator)
throws IOException {
  this(fs.create(path,replication), blocksize, compress, comparator);
  this.closeOutputStream = true;
  this.name = path.toString();
  this.path = path;
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:19,代码来源:IndexFile.java

示例13: setupMockColumnFamilies

import org.apache.hadoop.hbase.io.hfile.Compression; //导入方法依赖的package包/类
private void setupMockColumnFamilies(HTable table,
  Map<String, Compression.Algorithm> familyToCompression) throws IOException
{
  HTableDescriptor mockTableDescriptor = new HTableDescriptor(TABLE_NAME);
  for (Entry<String, Compression.Algorithm> entry : familyToCompression.entrySet()) {
    mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey())
        .setMaxVersions(1)
        .setCompressionType(entry.getValue())
        .setBlockCacheEnabled(false)
        .setTimeToLive(0));
  }
  Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor();
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:14,代码来源:TestHFileOutputFormat.java

示例14: getSupportedCompressionAlgorithms

import org.apache.hadoop.hbase.io.hfile.Compression; //导入方法依赖的package包/类
/**
 * Get supported compression algorithms.
 * @return supported compression algorithms.
 */
public static Compression.Algorithm[] getSupportedCompressionAlgorithms() {
  String[] allAlgos = HFile.getSupportedCompressionAlgorithms();
  List<Compression.Algorithm> supportedAlgos = new ArrayList<Compression.Algorithm>();
  for (String algoName : allAlgos) {
    try {
      Compression.Algorithm algo = Compression.getCompressionAlgorithmByName(algoName);
      algo.getCompressor();
      supportedAlgos.add(algo);
    } catch (Throwable t) {
      // this algo is not available
    }
  }
  return supportedAlgos.toArray(new Compression.Algorithm[0]);
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:19,代码来源:HBaseTestingUtility.java

示例15: getCompactionCompression

import org.apache.hadoop.hbase.io.hfile.Compression; //导入方法依赖的package包/类
/** @return compression type being used for the column family for major 
    compression */
public Compression.Algorithm getCompactionCompression() {
  String n = getValue(COMPRESSION_COMPACT);
  if (n == null) {
    return getCompression();
  }
  return Compression.Algorithm.valueOf(n.toUpperCase());
}
 
开发者ID:lifeng5042,项目名称:RStore,代码行数:10,代码来源:HColumnDescriptor.java


注:本文中的org.apache.hadoop.hbase.io.hfile.Compression.Algorithm方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。