当前位置: 首页>>代码示例>>Java>>正文


Java BloomType类代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.regionserver.StoreFile.BloomType的典型用法代码示例。如果您正苦于以下问题:Java BloomType类的具体用法?Java BloomType怎么用?Java BloomType使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


BloomType类属于org.apache.hadoop.hbase.regionserver.StoreFile包,在下文中一共展示了BloomType类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: HFileSortedOplogWriter

import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType; //导入依赖的package包/类
public HFileSortedOplogWriter() throws IOException {
      writer = HFile.getWriterFactory(hconf, hcache)
          .withPath(fs, path)
          .withBlockSize(sopConfig.getBlockSize())
          .withBytesPerChecksum(sopConfig.getBytesPerChecksum())
          .withChecksumType(HFileSortedOplogFactory.convertChecksum(sopConfig.getChecksum()))
//          .withComparator(sopConfig.getComparator())
          .withCompression(HFileSortedOplogFactory.convertCompression(sopConfig.getCompression()))
          .withDataBlockEncoder(HFileSortedOplogFactory.convertEncoding(sopConfig.getKeyEncoding()))
          .create();
      
      bfw = sopConfig.isBloomFilterEnabled() ?
//          BloomFilterFactory.createGeneralBloomAtWrite(hconf, hcache, BloomType.ROW,
//              0, writer, sopConfig.getComparator())
          BloomFilterFactory.createGeneralBloomAtWrite(hconf, hcache, BloomType.ROW,
              0, writer)
          : null;
    }
 
开发者ID:gemxd,项目名称:gemfirexd-oss,代码行数:19,代码来源:HFileSortedOplog.java

示例2: HFileSortedOplogWriter

import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType; //导入依赖的package包/类
public HFileSortedOplogWriter(int keys) throws IOException {
      try {
        int hfileBlockSize = Integer.getInteger(
            HoplogConfig.HFILE_BLOCK_SIZE_CONF, (1 << 16));

        Algorithm compress = Algorithm.valueOf(System.getProperty(HoplogConfig.COMPRESSION,
            HoplogConfig.COMPRESSION_DEFAULT));

//        ByteComparator bc = new ByteComparator();
        writer = HFile.getWriterFactory(conf, cacheConf)
            .withPath(fsProvider.getFS(), path)
            .withBlockSize(hfileBlockSize)
//            .withComparator(bc)
            .withCompression(compress)
            .create();
        bfw = BloomFilterFactory.createGeneralBloomAtWrite(conf, cacheConf, BloomType.ROW, keys,
            writer);

        logger.fine("Created hoplog writer with compression " + compress);
      } catch (IOException e) {
        logger.fine("IO Error while creating writer");
        throw e;
      }
    }
 
开发者ID:gemxd,项目名称:gemfirexd-oss,代码行数:25,代码来源:HFileSortedOplog.java

示例3: colDescFromThrift

import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType; //导入依赖的package包/类
/**
 * This utility method creates a new Hbase HColumnDescriptor object based on a
 * Thrift ColumnDescriptor "struct".
 *
 * @param in
 *          Thrift ColumnDescriptor object
 * @return HColumnDescriptor
 * @throws IllegalArgument
 */
static public HColumnDescriptor colDescFromThrift(ColumnDescriptor in)
    throws IllegalArgument {
  Compression.Algorithm comp =
    Compression.getCompressionAlgorithmByName(in.compression.toLowerCase());
  StoreFile.BloomType bt =
    BloomType.valueOf(in.bloomFilterType);

  if (in.name == null || !in.name.hasRemaining()) {
    throw new IllegalArgument("column name is empty");
  }
  byte [] parsedName = KeyValue.parseColumn(Bytes.getBytes(in.name))[0];
  HColumnDescriptor col = new HColumnDescriptor(parsedName)
      .setMaxVersions(in.maxVersions)
      .setCompressionType(comp)
      .setInMemory(in.inMemory)
      .setBlockCacheEnabled(in.blockCacheEnabled)
      .setTimeToLive(in.timeToLive)
      .setBloomFilterType(bt);
  return col;
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:30,代码来源:ThriftUtilities.java

示例4: initHRegion

import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType; //导入依赖的package包/类
/**
 * Callers must afterward call {@link HRegion#closeHRegion(HRegion)}
 * @param tableName
 * @param callingMethod
 * @param conf
 * @param families
 * @throws IOException
 * @return created and initialized region.
 */
private HRegion initHRegion(byte[] tableName, String callingMethod,
    HBaseConfiguration conf, String family) throws IOException {
  HTableDescriptor htd = new HTableDescriptor(tableName);
  HColumnDescriptor familyDesc;
  for (int i = 0; i < BLOOM_TYPE.length; i++) {
    BloomType bloomType = BLOOM_TYPE[i];
    familyDesc = new HColumnDescriptor(family + "_" + bloomType)
        .setBlocksize(1)
        .setBloomFilterType(BLOOM_TYPE[i]);
    htd.addFamily(familyDesc);
  }

  HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
  Path path = new Path(DIR + callingMethod);
  HRegion r = HRegion.createHRegion(info, path, conf, htd);
  blockCache = new CacheConfig(conf).getBlockCache();
  return r;
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:28,代码来源:TestBlocksRead.java

示例5: testBloomFilter

import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType; //导入依赖的package包/类
public void testBloomFilter() throws Exception {
  FileSystem fs = FileSystem.getLocal(conf);
  conf.setFloat(BloomFilterFactory.IO_STOREFILE_BLOOM_ERROR_RATE,
      (float) 0.01);
  conf.setBoolean(BloomFilterFactory.IO_STOREFILE_BLOOM_ENABLED, true);

  // write the file
  Path f = new Path(ROOT_DIR, getName());
  StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, fs,
      StoreFile.DEFAULT_BLOCKSIZE_SMALL)
          .withFilePath(f)
          .withBloomType(StoreFile.BloomType.ROW)
          .withMaxKeyCount(2000)
          .withChecksumType(CKTYPE)
          .withBytesPerChecksum(CKBYTES)
          .build();
  bloomWriteRead(writer, fs);
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:19,代码来源:TestStoreFile.java

示例6: colDescFromThrift

import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType; //导入依赖的package包/类
/**
 * This utility method creates a new Hbase HColumnDescriptor object based on a
 * Thrift ColumnDescriptor "struct".
 *
 * @param in
 *          Thrift ColumnDescriptor object
 * @return HColumnDescriptor
 * @throws IllegalArgument
 */
static public HColumnDescriptor colDescFromThrift(ColumnDescriptor in)
    throws IllegalArgument {
  Compression.Algorithm comp =
    Compression.getCompressionAlgorithmByName(in.compression.toLowerCase());
  StoreFile.BloomType bt =
    BloomType.valueOf(in.bloomFilterType);

  if (in.name == null || !in.name.hasRemaining()) {
    throw new IllegalArgument("column name is empty");
  }
  byte [] parsedName = KeyValue.parseColumn(Bytes.getBytes(in.name))[0];
  HColumnDescriptor col = new HColumnDescriptor(parsedName,
      in.maxVersions, comp.getName(), in.inMemory, in.blockCacheEnabled,
      in.timeToLive, bt.toString());
  return col;
}
 
开发者ID:lifeng5042,项目名称:RStore,代码行数:26,代码来源:ThriftUtilities.java

示例7: createTable

import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType; //导入依赖的package包/类
/**
 * Attempts to create the table used by this tool with the fixed configuration details
 *
 * @param admin The configured administration used to perform this operation
 */
private void createTable(final HBaseAdmin admin) {
    final String tableName = appConfig.getToolTable();

    try {
        if( !admin.tableExists(tableName) ) {
            HTableDescriptor tableDesc = new HTableDescriptor(tableName.getBytes(Charsets.UTF_8));

            HColumnDescriptor colDesc = new HColumnDescriptor(ConfigConstants.COLUMN_FAMILY);
            colDesc.setBlockCacheEnabled(true).setBlocksize(65536)
                    .setBloomFilterType(BloomType.ROW)
                    .setCompressionType(Algorithm.SNAPPY)
                    .setDataBlockEncoding(DataBlockEncoding.PREFIX)
                    .setMaxVersions(1);

            tableDesc.addFamily(colDesc);

            admin.createTable(tableDesc);
            log.info("Created table: " + tableName);
        } else {
            log.debug("Table already exists, creation skipped");
        }
    } catch (IOException e) {
        log.error("Error occurred during table creation", e);
    }
}
 
开发者ID:altamiracorp,项目名称:hbase-benchmark,代码行数:31,代码来源:BenchmarkDriver.java

示例8: createTable

import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType; //导入依赖的package包/类
private static boolean createTable(byte[] tableName, byte[] columnFamilyName,
    short regionCount, long regionMaxSize, HBaseAdmin admin)
    throws IOException {

  if (admin.tableExists(tableName)) {
    return false;
  }

  HTableDescriptor tableDescriptor = new HTableDescriptor();
  tableDescriptor.setName(tableName);

  HColumnDescriptor columnDescriptor = new HColumnDescriptor(columnFamilyName);

  columnDescriptor.setCompressionType(Compression.Algorithm.SNAPPY);
  columnDescriptor.setBlocksize(64 * 1024);
  columnDescriptor.setBloomFilterType(BloomType.ROW);
  columnDescriptor.setMaxVersions(10);
  tableDescriptor.addFamily(columnDescriptor);

  tableDescriptor.setMaxFileSize(regionMaxSize);
  tableDescriptor.setValue(tableDescriptor.SPLIT_POLICY,
      ConstantSizeRegionSplitPolicy.class.getName());

  tableDescriptor.setDeferredLogFlush(true);

  regionCount = (short) Math.abs(regionCount);

  int regionRange = Short.MAX_VALUE / regionCount;
  int counter = 0;

  byte[][] splitKeys = new byte[regionCount][];
  for (byte[] splitKey : splitKeys) {
    counter = counter + regionRange;
    String key = StringUtils.leftPad(Integer.toString(counter), 5, '0');
    splitKey = Bytes.toBytes(key);
    System.out.println(" - Split: " + splitKey);
  }
  return true;
}
 
开发者ID:amitchmca,项目名称:hadooparchitecturebook,代码行数:40,代码来源:App.java

示例9: getBloomFilterType

import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType; //导入依赖的package包/类
/**
 * @return bloom filter type used for new StoreFiles in ColumnFamily
 */
public StoreFile.BloomType getBloomFilterType() {
  String n = getValue(BLOOMFILTER);
  if (n == null) {
    n = DEFAULT_BLOOMFILTER;
  }
  return StoreFile.BloomType.valueOf(n.toUpperCase());
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:11,代码来源:HColumnDescriptor.java

示例10: testSimpleLoad

import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType; //导入依赖的package包/类
/**
 * Test case that creates some regions and loads
 * HFiles that fit snugly inside those regions
 */
@Test
public void testSimpleLoad() throws Exception {
  runTest("testSimpleLoad", BloomType.NONE,
      new byte[][][] {
        new byte[][]{ Bytes.toBytes("aaaa"), Bytes.toBytes("cccc") },
        new byte[][]{ Bytes.toBytes("ddd"), Bytes.toBytes("ooo") },
  });
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:13,代码来源:TestLoadIncrementalHFiles.java

示例11: testRegionCrossingLoad

import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType; //导入依赖的package包/类
/**
 * Test case that creates some regions and loads
 * HFiles that cross the boundaries of those regions
 */
@Test
public void testRegionCrossingLoad() throws Exception {
  runTest("testRegionCrossingLoad", BloomType.NONE,
      new byte[][][] {
        new byte[][]{ Bytes.toBytes("aaaa"), Bytes.toBytes("eee") },
        new byte[][]{ Bytes.toBytes("fff"), Bytes.toBytes("zzz") },
  });
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:13,代码来源:TestLoadIncrementalHFiles.java

示例12: testRegionCrossingRowBloom

import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType; //导入依赖的package包/类
/**
 * Test loading into a column family that has a ROW bloom filter.
 */
@Test
public void testRegionCrossingRowBloom() throws Exception {
  runTest("testRegionCrossingLoadRowBloom", BloomType.ROW,
      new byte[][][] {
        new byte[][]{ Bytes.toBytes("aaaa"), Bytes.toBytes("eee") },
        new byte[][]{ Bytes.toBytes("fff"), Bytes.toBytes("zzz") },
  });
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:12,代码来源:TestLoadIncrementalHFiles.java

示例13: testRegionCrossingRowColBloom

import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType; //导入依赖的package包/类
/**
 * Test loading into a column family that has a ROWCOL bloom filter.
 */
@Test
public void testRegionCrossingRowColBloom() throws Exception {
  runTest("testRegionCrossingLoadRowColBloom", BloomType.ROWCOL,
      new byte[][][] {
        new byte[][]{ Bytes.toBytes("aaaa"), Bytes.toBytes("eee") },
        new byte[][]{ Bytes.toBytes("fff"), Bytes.toBytes("zzz") },
  });
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:12,代码来源:TestLoadIncrementalHFiles.java

示例14: runTest

import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType; //导入依赖的package包/类
private void runTest(String testName, BloomType bloomType, 
        byte[][][] hfileRanges) throws Exception {
  Path dir = util.getDataTestDir(testName);
  FileSystem fs = util.getTestFileSystem();
  dir = dir.makeQualified(fs);
  Path familyDir = new Path(dir, Bytes.toString(FAMILY));

  int hfileIdx = 0;
  for (byte[][] range : hfileRanges) {
    byte[] from = range[0];
    byte[] to = range[1];
    createHFile(util.getConfiguration(), fs, new Path(familyDir, "hfile_"
        + hfileIdx++), FAMILY, QUALIFIER, from, to, 1000);
  }
  int expectedRows = hfileIdx * 1000;

  final byte[] TABLE = Bytes.toBytes("mytable_"+testName);

  HBaseAdmin admin = new HBaseAdmin(util.getConfiguration());
  HTableDescriptor htd = new HTableDescriptor(TABLE);
  HColumnDescriptor familyDesc = new HColumnDescriptor(FAMILY);
  familyDesc.setBloomFilterType(bloomType);
  htd.addFamily(familyDesc);
  admin.createTable(htd, SPLIT_KEYS);

  HTable table = new HTable(util.getConfiguration(), TABLE);
  util.waitTableAvailable(TABLE, 30000);

  LoadIncrementalHFiles loader = new LoadIncrementalHFiles(util.getConfiguration());
  loader.doBulkLoad(dir, table);

  assertEquals(expectedRows, util.countRows(table));
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:34,代码来源:TestLoadIncrementalHFiles.java

示例15: testDeleteRowWithBloomFilter

import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType; //导入依赖的package包/类
/**
  * Testcase to cover bug-fix for HBASE-2823
  * Ensures correct delete when issuing delete row
  * on columns with bloom filter set to row+col (BloomType.ROWCOL)
 */
public void testDeleteRowWithBloomFilter() throws IOException {
  byte [] tableName = Bytes.toBytes("testDeleteRowWithBloomFilter");
  byte [] familyName = Bytes.toBytes("familyName");

  // Create Table
  HColumnDescriptor hcd = new HColumnDescriptor(familyName)
      .setMaxVersions(Integer.MAX_VALUE)
      .setBloomFilterType(BloomType.ROWCOL);

  HTableDescriptor htd = new HTableDescriptor(tableName);
  htd.addFamily(hcd);
  HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
  Path path = new Path(DIR + "TestDeleteRowWithBloomFilter");
  this.region = HRegion.createHRegion(info, path, conf, htd);
  try {
    // Insert some data
    byte row[] = Bytes.toBytes("row1");
    byte col[] = Bytes.toBytes("col1");

    Put put = new Put(row);
    put.add(familyName, col, 1, Bytes.toBytes("SomeRandomValue"));
    region.put(put);
    region.flushcache();

    Delete del = new Delete(row);
    region.delete(del, null, true);
    region.flushcache();

    // Get remaining rows (should have none)
    Get get = new Get(row);
    get.addColumn(familyName, col);

    KeyValue[] keyValues = region.get(get, null).raw();
    assertTrue(keyValues.length == 0);
  } finally {
    HRegion.closeHRegion(this.region);
    this.region = null;
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:45,代码来源:TestHRegion.java


注:本文中的org.apache.hadoop.hbase.regionserver.StoreFile.BloomType类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。