当前位置: 首页>>代码示例>>Java>>正文


Java Compression类代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.io.compress.Compression的典型用法代码示例。如果您正苦于以下问题:Java Compression类的具体用法?Java Compression怎么用?Java Compression使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


Compression类属于org.apache.hadoop.hbase.io.compress包,在下文中一共展示了Compression类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: addColumn

import org.apache.hadoop.hbase.io.compress.Compression; //导入依赖的package包/类
/**
 * 往表中添加列族
 *
 * @param tableName  表名
 * @param familyName 列族名
 */
public void addColumn(String tableName, String familyName) {
    HBaseConfiguration hBaseConfiguration = new HBaseConfiguration();
    Admin admin = hBaseConfiguration.admin();
    TableName tb = TableName.valueOf(tableName);
    try {
        if (admin.tableExists(tb)) {
            HColumnDescriptor columnDescriptor = new HColumnDescriptor(familyName);

            columnDescriptor.setMaxVersions(1);//设置列族保留的最多版本
            columnDescriptor.setCompressionType(Compression.Algorithm.GZ);//设置压缩算法
            columnDescriptor.setCompactionCompressionType(Compression.Algorithm.GZ);//合并压缩算法

            admin.addColumn(tb, columnDescriptor);
        } else {
            log.info("表名【" + tableName + "】不存在");
        }
    } catch (IOException e) {
        log.error(e);
    } finally {
        hBaseConfiguration.close();
    }
}
 
开发者ID:mumuhadoop,项目名称:mumu-hbase,代码行数:29,代码来源:HBaseTableOperation.java

示例2: colDescFromThrift

import org.apache.hadoop.hbase.io.compress.Compression; //导入依赖的package包/类
/**
 * This utility method creates a new Hbase HColumnDescriptor object based on a
 * Thrift ColumnDescriptor "struct".
 *
 * @param in
 *          Thrift ColumnDescriptor object
 * @return HColumnDescriptor
 * @throws IllegalArgument
 */
static public HColumnDescriptor colDescFromThrift(ColumnDescriptor in)
    throws IllegalArgument {
  Compression.Algorithm comp =
    Compression.getCompressionAlgorithmByName(in.compression.toLowerCase());
  BloomType bt =
    BloomType.valueOf(in.bloomFilterType);

  if (in.name == null || !in.name.hasRemaining()) {
    throw new IllegalArgument("column name is empty");
  }
  byte [] parsedName = KeyValue.parseColumn(Bytes.getBytes(in.name))[0];
  HColumnDescriptor col = new HColumnDescriptor(parsedName)
      .setMaxVersions(in.maxVersions)
      .setCompressionType(comp)
      .setInMemory(in.inMemory)
      .setBlockCacheEnabled(in.blockCacheEnabled)
      .setTimeToLive(in.timeToLive > 0 ? in.timeToLive : Integer.MAX_VALUE)
      .setBloomFilterType(bt);
  return col;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:30,代码来源:ThriftUtilities.java

示例3: testCompression

import org.apache.hadoop.hbase.io.compress.Compression; //导入依赖的package包/类
public static boolean testCompression(String codec) {
  codec = codec.toLowerCase();

  Compression.Algorithm a;

  try {
    a = Compression.getCompressionAlgorithmByName(codec);
  } catch (IllegalArgumentException e) {
    LOG.warn("Codec type: " + codec + " is not known");
    return false;
  }

  try {
    testCompression(a);
    return true;
  } catch (IOException ignored) {
    LOG.warn("Can't instantiate codec: " + codec, ignored);
    return false;
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:21,代码来源:CompressionTest.java

示例4: deserializeFromWritable

import org.apache.hadoop.hbase.io.compress.Compression; //导入依赖的package包/类
/**
 * Deserialize the file trailer as writable data
 * @param input
 * @throws IOException
 */
void deserializeFromWritable(DataInput input) throws IOException {
  fileInfoOffset = input.readLong();
  loadOnOpenDataOffset = input.readLong();
  dataIndexCount = input.readInt();
  uncompressedDataIndexSize = input.readLong();
  metaIndexCount = input.readInt();

  totalUncompressedBytes = input.readLong();
  entryCount = input.readLong();
  compressionCodec = Compression.Algorithm.values()[input.readInt()];
  numDataIndexLevels = input.readInt();
  firstDataBlockOffset = input.readLong();
  lastDataBlockOffset = input.readLong();
  // TODO this is a classname encoded into an  HFile's trailer. We are going to need to have 
  // some compat code here.
  setComparatorClass(getComparatorClass(Bytes.readStringFixedSize(input,
      MAX_COMPARATOR_NAME_LENGTH)));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:24,代码来源:FixedFileTrailer.java

示例5: generateColumnDescriptors

import org.apache.hadoop.hbase.io.compress.Compression; //导入依赖的package包/类
/**
 * Create a set of column descriptors with the combination of compression,
 * encoding, bloom codecs available.
 * @param prefix family names prefix
 * @return the list of column descriptors
 */
public static List<HColumnDescriptor> generateColumnDescriptors(final String prefix) {
  List<HColumnDescriptor> htds = new ArrayList<HColumnDescriptor>();
  long familyId = 0;
  for (Compression.Algorithm compressionType: getSupportedCompressionAlgorithms()) {
    for (DataBlockEncoding encodingType: DataBlockEncoding.values()) {
      for (BloomType bloomType: BloomType.values()) {
        String name = String.format("%[email protected]#&-%[email protected]#", prefix, familyId);
        HColumnDescriptor htd = new HColumnDescriptor(name);
        htd.setCompressionType(compressionType);
        htd.setDataBlockEncoding(encodingType);
        htd.setBloomFilterType(bloomType);
        htds.add(htd);
        familyId++;
      }
    }
  }
  return htds;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:25,代码来源:HBaseTestingUtility.java

示例6: testSerializeDeserializeFamilyCompressionMap

import org.apache.hadoop.hbase.io.compress.Compression; //导入依赖的package包/类
/**
 * Test for {@link HFileOutputFormat2#configureCompression(org.apache.hadoop.hbase.client.Table,
 * Configuration)} and {@link HFileOutputFormat2#createFamilyCompressionMap
 * (Configuration)}.
 * Tests that the compression map is correctly serialized into
 * and deserialized from configuration
 *
 * @throws IOException
 */
@Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test
public void testSerializeDeserializeFamilyCompressionMap() throws IOException {
  for (int numCfs = 0; numCfs <= 3; numCfs++) {
    Configuration conf = new Configuration(this.util.getConfiguration());
    Map<String, Compression.Algorithm> familyToCompression =
        getMockColumnFamiliesForCompression(numCfs);
    Table table = Mockito.mock(HTable.class);
    setupMockColumnFamiliesForCompression(table, familyToCompression);
    HFileOutputFormat2.configureCompression(conf, table.getTableDescriptor());

    // read back family specific compression setting from the configuration
    Map<byte[], Algorithm> retrievedFamilyToCompressionMap = HFileOutputFormat2
        .createFamilyCompressionMap(conf);

    // test that we have a value for all column families that matches with the
    // used mock values
    for (Entry<String, Algorithm> entry : familyToCompression.entrySet()) {
      assertEquals("Compression configuration incorrect for column family:"
          + entry.getKey(), entry.getValue(),
          retrievedFamilyToCompressionMap.get(entry.getKey().getBytes()));
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:33,代码来源:TestHFileOutputFormat2.java

示例7: getMockColumnFamiliesForCompression

import org.apache.hadoop.hbase.io.compress.Compression; //导入依赖的package包/类
/**
 * @return a map from column family names to compression algorithms for
 *         testing column family compression. Column family names have special characters
 */
private Map<String, Compression.Algorithm>
    getMockColumnFamiliesForCompression (int numCfs) {
  Map<String, Compression.Algorithm> familyToCompression
    = new HashMap<String, Compression.Algorithm>();
  // use column family names having special characters
  if (numCfs-- > 0) {
    familyToCompression.put("[email protected]#[email protected]#&", Compression.Algorithm.LZO);
  }
  if (numCfs-- > 0) {
    familyToCompression.put("Family2=asdads&!AASD", Compression.Algorithm.SNAPPY);
  }
  if (numCfs-- > 0) {
    familyToCompression.put("Family2=asdads&!AASD", Compression.Algorithm.GZ);
  }
  if (numCfs-- > 0) {
    familyToCompression.put("Family3", Compression.Algorithm.NONE);
  }
  return familyToCompression;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:24,代码来源:TestHFileOutputFormat2.java

示例8: testSerializeDeserializeFamilyCompressionMap

import org.apache.hadoop.hbase.io.compress.Compression; //导入依赖的package包/类
/**
 * Test for {@link HFileOutputFormat#configureCompression(org.apache.hadoop.hbase.client.Table,
 * Configuration)} and {@link HFileOutputFormat#createFamilyCompressionMap
 * (Configuration)}.
 * Tests that the compression map is correctly serialized into
 * and deserialized from configuration
 *
 * @throws IOException
 */
@Test
public void testSerializeDeserializeFamilyCompressionMap() throws IOException {
  for (int numCfs = 0; numCfs <= 3; numCfs++) {
    Configuration conf = new Configuration(this.util.getConfiguration());
    Map<String, Compression.Algorithm> familyToCompression =
        getMockColumnFamiliesForCompression(numCfs);
    Table table = Mockito.mock(HTable.class);
    setupMockColumnFamiliesForCompression(table, familyToCompression);
    HFileOutputFormat.configureCompression(table, conf);

    // read back family specific compression setting from the configuration
    Map<byte[], Algorithm> retrievedFamilyToCompressionMap = HFileOutputFormat
        .createFamilyCompressionMap(conf);

    // test that we have a value for all column families that matches with the
    // used mock values
    for (Entry<String, Algorithm> entry : familyToCompression.entrySet()) {
      assertEquals("Compression configuration incorrect for column family:"
          + entry.getKey(), entry.getValue(),
          retrievedFamilyToCompressionMap.get(entry.getKey().getBytes()));
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:33,代码来源:TestHFileOutputFormat.java

示例9: getMockColumnFamiliesForCompression

import org.apache.hadoop.hbase.io.compress.Compression; //导入依赖的package包/类
/**
 * @return a map from column family names to compression algorithms for
 *         testing column family compression. Column family names have special characters
 */
private Map<String, Compression.Algorithm>
    getMockColumnFamiliesForCompression (int numCfs) {
  Map<String, Compression.Algorithm> familyToCompression = new HashMap<String, Compression.Algorithm>();
  // use column family names having special characters
  if (numCfs-- > 0) {
    familyToCompression.put("[email protected]#[email protected]#&", Compression.Algorithm.LZO);
  }
  if (numCfs-- > 0) {
    familyToCompression.put("Family2=asdads&!AASD", Compression.Algorithm.SNAPPY);
  }
  if (numCfs-- > 0) {
    familyToCompression.put("Family2=asdads&!AASD", Compression.Algorithm.GZ);
  }
  if (numCfs-- > 0) {
    familyToCompression.put("Family3", Compression.Algorithm.NONE);
  }
  return familyToCompression;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:TestHFileOutputFormat.java

示例10: parseColumnFamilyOptions

import org.apache.hadoop.hbase.io.compress.Compression; //导入依赖的package包/类
private void parseColumnFamilyOptions(CommandLine cmd) {
  String dataBlockEncodingStr = cmd.getOptionValue(OPT_DATA_BLOCK_ENCODING);
  dataBlockEncodingAlgo = dataBlockEncodingStr == null ? null :
      DataBlockEncoding.valueOf(dataBlockEncodingStr);

  String compressStr = cmd.getOptionValue(OPT_COMPRESSION);
  compressAlgo = compressStr == null ? Compression.Algorithm.NONE :
      Compression.Algorithm.valueOf(compressStr);

  String bloomStr = cmd.getOptionValue(OPT_BLOOM);
  bloomType = bloomStr == null ? BloomType.ROW :
      BloomType.valueOf(bloomStr);

  inMemoryCF = cmd.hasOption(OPT_INMEMORY);
  if (cmd.hasOption(OPT_ENCRYPTION)) {
    cipher = Encryption.getCipher(conf, cmd.getOptionValue(OPT_ENCRYPTION));
  }

}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:LoadTestTool.java

示例11: testCreateWriter

import org.apache.hadoop.hbase.io.compress.Compression; //导入依赖的package包/类
/**
 * Verify that compression and data block encoding are respected by the
 * Store.createWriterInTmp() method, used on store flush.
 */
@Test
public void testCreateWriter() throws Exception {
  Configuration conf = HBaseConfiguration.create();
  FileSystem fs = FileSystem.get(conf);

  HColumnDescriptor hcd = new HColumnDescriptor(family);
  hcd.setCompressionType(Compression.Algorithm.GZ);
  hcd.setDataBlockEncoding(DataBlockEncoding.DIFF);
  init(name.getMethodName(), conf, hcd);

  // Test createWriterInTmp()
  StoreFile.Writer writer = store.createWriterInTmp(4, hcd.getCompression(), false, true, false);
  Path path = writer.getPath();
  writer.append(new KeyValue(row, family, qf1, Bytes.toBytes(1)));
  writer.append(new KeyValue(row, family, qf2, Bytes.toBytes(2)));
  writer.append(new KeyValue(row2, family, qf1, Bytes.toBytes(3)));
  writer.append(new KeyValue(row2, family, qf2, Bytes.toBytes(4)));
  writer.close();

  // Verify that compression and encoding settings are respected
  HFile.Reader reader = HFile.createReader(fs, path, new CacheConfig(conf), conf);
  Assert.assertEquals(hcd.getCompressionType(), reader.getCompressionAlgorithm());
  Assert.assertEquals(hcd.getDataBlockEncoding(), reader.getDataBlockEncoding());
  reader.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:30,代码来源:TestStore.java

示例12: createTestV2Block

import org.apache.hadoop.hbase.io.compress.Compression; //导入依赖的package包/类
static HFileBlock.Writer createTestV2Block(Compression.Algorithm algo,
    boolean includesMemstoreTS, boolean includesTag) throws IOException {
  final BlockType blockType = BlockType.DATA;
  HFileContext meta = new HFileContextBuilder()
                      .withCompression(algo)
                      .withIncludesMvcc(includesMemstoreTS)
                      .withIncludesTags(includesTag)
                      .withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM)
                      .build();
  HFileBlock.Writer hbw = new HFileBlock.Writer(null, meta);
  DataOutputStream dos = hbw.startWriting(blockType);
  writeTestBlockContents(dos);
  dos.flush();
  hbw.ensureBlockReady();
  assertEquals(1000 * 4, hbw.getUncompressedSizeWithoutHeader());
  hbw.release();
  return hbw;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:TestHFileBlock.java

示例13: assertBuffersEqual

import org.apache.hadoop.hbase.io.compress.Compression; //导入依赖的package包/类
static void assertBuffersEqual(ByteBuffer expectedBuffer,
    ByteBuffer actualBuffer, Compression.Algorithm compression,
    DataBlockEncoding encoding, boolean pread) {
  if (!actualBuffer.equals(expectedBuffer)) {
    int prefix = 0;
    int minLimit = Math.min(expectedBuffer.limit(), actualBuffer.limit());
    while (prefix < minLimit &&
        expectedBuffer.get(prefix) == actualBuffer.get(prefix)) {
      prefix++;
    }

    fail(String.format(
        "Content mismatch for %s, commonPrefix %d, expected %s, got %s",
        buildMessageDetails(compression, encoding, pread), prefix,
        nextBytesToStr(expectedBuffer, prefix),
        nextBytesToStr(actualBuffer, prefix)));
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:TestHFileBlock.java

示例14: testNullMetaBlocks

import org.apache.hadoop.hbase.io.compress.Compression; //导入依赖的package包/类
public void testNullMetaBlocks() throws Exception {
  if (cacheConf == null) cacheConf = new CacheConfig(conf);
  for (Compression.Algorithm compressAlgo : 
      HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
    Path mFile = new Path(ROOT_DIR, "nometa_" + compressAlgo + ".hfile");
    FSDataOutputStream fout = createFSOutput(mFile);
    HFileContext meta = new HFileContextBuilder().withCompression(compressAlgo)
                        .withBlockSize(minBlockSize).build();
    Writer writer = HFile.getWriterFactory(conf, cacheConf)
        .withOutputStream(fout)
        .withFileContext(meta)
        .create();
    KeyValue kv = new KeyValue("foo".getBytes(), "f1".getBytes(), null, "value".getBytes());
    writer.append(kv);
    writer.close();
    fout.close();
    Reader reader = HFile.createReader(fs, mFile, cacheConf, conf);
    reader.loadFileInfo();
    assertNull(reader.getMetaBlock("non-existant", false));
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:TestHFile.java

示例15: seekToTheKey

import org.apache.hadoop.hbase.io.compress.Compression; //导入依赖的package包/类
private void seekToTheKey(KeyValue expected, List<KeyValue> kvs, KeyValue toSeek)
    throws IOException {
  // create all seekers
  List<DataBlockEncoder.EncodedSeeker> encodedSeekers = new ArrayList<DataBlockEncoder.EncodedSeeker>();
  for (DataBlockEncoding encoding : DataBlockEncoding.values()) {
    if (encoding.getEncoder() == null || encoding == DataBlockEncoding.PREFIX_TREE) {
      continue;
    }

    DataBlockEncoder encoder = encoding.getEncoder();
    HFileContext meta = new HFileContextBuilder().withHBaseCheckSum(false)
        .withIncludesMvcc(false).withIncludesTags(false)
        .withCompression(Compression.Algorithm.NONE).build();
    HFileBlockEncodingContext encodingContext = encoder.newDataBlockEncodingContext(encoding,
        HConstants.HFILEBLOCK_DUMMY_HEADER, meta);
    ByteBuffer encodedBuffer = TestDataBlockEncoders.encodeKeyValues(encoding, kvs,
        encodingContext);
    DataBlockEncoder.EncodedSeeker seeker = encoder.createSeeker(KeyValue.COMPARATOR,
        encoder.newDataBlockDecodingContext(meta));
    seeker.setCurrentBuffer(encodedBuffer);
    encodedSeekers.add(seeker);
  }
  // test it!
  // try a few random seeks
  checkSeekingConsistency(encodedSeekers, toSeek, expected);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:27,代码来源:TestSeekToBlockWithEncoders.java


注:本文中的org.apache.hadoop.hbase.io.compress.Compression类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。