当前位置: 首页>>代码示例>>Java>>正文


Java BloomType类代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.regionserver.BloomType的典型用法代码示例。如果您正苦于以下问题:Java BloomType类的具体用法?Java BloomType怎么用?Java BloomType使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


BloomType类属于org.apache.hadoop.hbase.regionserver包,在下文中一共展示了BloomType类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: postStartMaster

import org.apache.hadoop.hbase.regionserver.BloomType; //导入依赖的package包/类
/********************************* Master related hooks **********************************/

  @Override
  public void postStartMaster(ObserverContext<MasterCoprocessorEnvironment> ctx) throws IOException {
    // Need to create the new system table for labels here
    MasterServices master = ctx.getEnvironment().getMasterServices();
    if (!MetaTableAccessor.tableExists(master.getConnection(), LABELS_TABLE_NAME)) {
      HTableDescriptor labelsTable = new HTableDescriptor(LABELS_TABLE_NAME);
      HColumnDescriptor labelsColumn = new HColumnDescriptor(LABELS_TABLE_FAMILY);
      labelsColumn.setBloomFilterType(BloomType.NONE);
      labelsColumn.setBlockCacheEnabled(false); // We will cache all the labels. No need of normal
                                                 // table block cache.
      labelsTable.addFamily(labelsColumn);
      // Let the "labels" table having only one region always. We are not expecting too many labels in
      // the system.
      labelsTable.setValue(HTableDescriptor.SPLIT_POLICY,
          DisabledRegionSplitPolicy.class.getName());
      labelsTable.setValue(Bytes.toBytes(HConstants.DISALLOW_WRITES_IN_RECOVERING),
          Bytes.toBytes(true));
      master.createTable(labelsTable, null, HConstants.NO_NONCE, HConstants.NO_NONCE);
    }
  }
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:VisibilityController.java

示例2: colDescFromThrift

import org.apache.hadoop.hbase.regionserver.BloomType; //导入依赖的package包/类
/**
 * This utility method creates a new Hbase HColumnDescriptor object based on a
 * Thrift ColumnDescriptor "struct".
 *
 * @param in
 *          Thrift ColumnDescriptor object
 * @return HColumnDescriptor
 * @throws IllegalArgument
 */
static public HColumnDescriptor colDescFromThrift(ColumnDescriptor in)
    throws IllegalArgument {
  Compression.Algorithm comp =
    Compression.getCompressionAlgorithmByName(in.compression.toLowerCase());
  BloomType bt =
    BloomType.valueOf(in.bloomFilterType);

  if (in.name == null || !in.name.hasRemaining()) {
    throw new IllegalArgument("column name is empty");
  }
  byte [] parsedName = KeyValue.parseColumn(Bytes.getBytes(in.name))[0];
  HColumnDescriptor col = new HColumnDescriptor(parsedName)
      .setMaxVersions(in.maxVersions)
      .setCompressionType(comp)
      .setInMemory(in.inMemory)
      .setBlockCacheEnabled(in.blockCacheEnabled)
      .setTimeToLive(in.timeToLive > 0 ? in.timeToLive : Integer.MAX_VALUE)
      .setBloomFilterType(bt);
  return col;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:30,代码来源:ThriftUtilities.java

示例3: createACLTable

import org.apache.hadoop.hbase.regionserver.BloomType; //导入依赖的package包/类
/**
 * Create the ACL table
 * @param master
 * @throws IOException
 */
static void createACLTable(MasterServices master) throws IOException {
  master.createTable(new HTableDescriptor(ACL_TABLE_NAME)
    .addFamily(new HColumnDescriptor(ACL_LIST_FAMILY)
      .setMaxVersions(1)
      .setInMemory(true)
      .setBlockCacheEnabled(true)
      .setBlocksize(8 * 1024)
      .setBloomFilterType(BloomType.NONE)
      .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
      // Set cache data blocks in L1 if more than one cache tier deployed; e.g. this will
      // be the case if we are using CombinedBlockCache (Bucket Cache).
      .setCacheDataInL1(true)),
  null,
  HConstants.NO_NONCE,
  HConstants.NO_NONCE);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:AccessControlLists.java

示例4: initRowKeyList

import org.apache.hadoop.hbase.regionserver.BloomType; //导入依赖的package包/类
private List<byte[]> initRowKeyList(FileSystem fileSystem, CacheConfig cacheConf,
    Configuration conf, TreeMap<byte[], TreeSet<byte[]>> indexFamilyMap,
    ScanRange.ScanRangeList rangeList) throws IOException {
  // init
  StoreFile bucketStoreFile =
      new StoreFile(fileSystem, LMDIndexParameters.getTmpBucketFilePath(file.getPath()), conf,
          cacheConf, BloomType.NONE);
  StoreFile secondaryStoreFile =
      new StoreFile(fileSystem, LMDIndexParameters.getTmpSecondaryFilePath(file.getPath()), conf,
          cacheConf, BloomType.NONE);
  StoreFileScanner bucketScanner = getStoreFileScanner(bucketStoreFile);
  StoreFileScanner secondaryScanner = getStoreFileScanner(secondaryStoreFile);
  // get hit buckets
  MDRange[] ranges = getRanges(indexFamilyMap, rangeList);
  List<LMDBucket> bucketList = getBucketRanges(bucketScanner, ranges);
  // scan rowkeys based on the buckets
  List<byte[]> rowkeyList = getRawRowkeyList(secondaryScanner, bucketList, ranges);
  // deinit
  bucketScanner.close();
  bucketStoreFile.closeReader(true);
  secondaryScanner.close();
  secondaryStoreFile.closeReader(true);
  return rowkeyList;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:25,代码来源:LMDIndexDirectStoreFileScanner.java

示例5: createTable

import org.apache.hadoop.hbase.regionserver.BloomType; //导入依赖的package包/类
/**
 * Create a table.
 * @param htd
 * @param families
 * @param splitKeys
 * @param c Configuration to use
 * @return An HTable instance for the created table.
 * @throws IOException
 */
public HTable createTable(HTableDescriptor htd, byte[][] families, byte[][] splitKeys,
    Configuration c) throws IOException {
  for (byte[] family : families) {
    HColumnDescriptor hcd = new HColumnDescriptor(family);
    // Disable blooms (they are on by default as of 0.95) but we disable them here because
    // tests have hard coded counts of what to expect in block cache, etc., and blooms being
    // on is interfering.
    hcd.setBloomFilterType(BloomType.NONE);
    htd.addFamily(hcd);
  }
  getHBaseAdmin().createTable(htd, splitKeys);
  // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are
  // assigned
  waitUntilAllRegionsAssigned(htd.getTableName());
  return (HTable) getConnection().getTable(htd.getTableName());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:26,代码来源:HBaseTestingUtility.java

示例6: generateColumnDescriptors

import org.apache.hadoop.hbase.regionserver.BloomType; //导入依赖的package包/类
/**
 * Create a set of column descriptors with the combination of compression,
 * encoding, bloom codecs available.
 * @param prefix family names prefix
 * @return the list of column descriptors
 */
public static List<HColumnDescriptor> generateColumnDescriptors(final String prefix) {
  List<HColumnDescriptor> htds = new ArrayList<HColumnDescriptor>();
  long familyId = 0;
  for (Compression.Algorithm compressionType: getSupportedCompressionAlgorithms()) {
    for (DataBlockEncoding encodingType: DataBlockEncoding.values()) {
      for (BloomType bloomType: BloomType.values()) {
        String name = String.format("%[email protected]#&-%[email protected]#", prefix, familyId);
        HColumnDescriptor htd = new HColumnDescriptor(name);
        htd.setCompressionType(compressionType);
        htd.setDataBlockEncoding(encodingType);
        htd.setBloomFilterType(bloomType);
        htds.add(htd);
        familyId++;
      }
    }
  }
  return htds;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:25,代码来源:HBaseTestingUtility.java

示例7: getMockColumnFamiliesForBloomType

import org.apache.hadoop.hbase.regionserver.BloomType; //导入依赖的package包/类
/**
 * @return a map from column family names to compression algorithms for
 *         testing column family compression. Column family names have special characters
 */
private Map<String, BloomType>
getMockColumnFamiliesForBloomType (int numCfs) {
  Map<String, BloomType> familyToBloomType =
      new HashMap<String, BloomType>();
  // use column family names having special characters
  if (numCfs-- > 0) {
    familyToBloomType.put("[email protected]#[email protected]#&", BloomType.ROW);
  }
  if (numCfs-- > 0) {
    familyToBloomType.put("Family2=asdads&!AASD",
        BloomType.ROWCOL);
  }
  if (numCfs-- > 0) {
    familyToBloomType.put("Family3", BloomType.NONE);
  }
  return familyToBloomType;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:TestHFileOutputFormat2.java

示例8: testSplitALot

import org.apache.hadoop.hbase.regionserver.BloomType; //导入依赖的package包/类
@Test
public void testSplitALot() throws Exception {
  runTest("testSplitALot", BloomType.NONE,
    new byte[][] {
      Bytes.toBytes("aaaa"), Bytes.toBytes("bbb"),
      Bytes.toBytes("ccc"), Bytes.toBytes("ddd"),
      Bytes.toBytes("eee"), Bytes.toBytes("fff"),
      Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
      Bytes.toBytes("iii"), Bytes.toBytes("lll"),
      Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
      Bytes.toBytes("ooo"), Bytes.toBytes("ppp"),
      Bytes.toBytes("qqq"), Bytes.toBytes("rrr"),
      Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
      Bytes.toBytes("uuu"), Bytes.toBytes("vvv"),
      Bytes.toBytes("zzz"),
    },
    new byte[][][] {
      new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("zzz") },
    }
  );
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:TestLoadIncrementalHFiles.java

示例9: runTest

import org.apache.hadoop.hbase.regionserver.BloomType; //导入依赖的package包/类
private void runTest(String testName, HTableDescriptor htd, BloomType bloomType,
    boolean preCreateTable, byte[][] tableSplitKeys, byte[][][] hfileRanges) throws Exception {

  for (boolean managed : new boolean[] { true, false }) {
    Path dir = util.getDataTestDirOnTestFS(testName);
    FileSystem fs = util.getTestFileSystem();
    dir = dir.makeQualified(fs);
    Path familyDir = new Path(dir, Bytes.toString(FAMILY));

    int hfileIdx = 0;
    for (byte[][] range : hfileRanges) {
      byte[] from = range[0];
      byte[] to = range[1];
      HFileTestUtil.createHFile(util.getConfiguration(), fs, new Path(familyDir, "hfile_"
          + hfileIdx++), FAMILY, QUALIFIER, from, to, 1000);
    }
    int expectedRows = hfileIdx * 1000;

    if (preCreateTable) {
      util.getHBaseAdmin().createTable(htd, tableSplitKeys);
    }

    final TableName tableName = htd.getTableName();
    if (!util.getHBaseAdmin().tableExists(tableName)) {
      util.getHBaseAdmin().createTable(htd);
    }
    LoadIncrementalHFiles loader = new LoadIncrementalHFiles(util.getConfiguration());

    if (managed) {
      try (HTable table = new HTable(util.getConfiguration(), tableName)) {
        loader.doBulkLoad(dir, table);
        assertEquals(expectedRows, util.countRows(table));
      }
    } else {
      try (Connection conn = ConnectionFactory.createConnection(util.getConfiguration());
          HTable table = (HTable) conn.getTable(tableName)) {
        loader.doBulkLoad(dir, table);
      }
    }

    // verify staging folder has been cleaned up
    Path stagingBasePath = SecureBulkLoadUtil.getBaseStagingDir(util.getConfiguration());
    if (fs.exists(stagingBasePath)) {
      FileStatus[] files = fs.listStatus(stagingBasePath);
      for (FileStatus file : files) {
        assertTrue("Folder=" + file.getPath() + " is not cleaned up.",
            file.getPath().getName() != "DONOTERASE");
      }
    }

    util.deleteTable(tableName);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:54,代码来源:TestLoadIncrementalHFiles.java

示例10: parseColumnFamilyOptions

import org.apache.hadoop.hbase.regionserver.BloomType; //导入依赖的package包/类
private void parseColumnFamilyOptions(CommandLine cmd) {
  String dataBlockEncodingStr = cmd.getOptionValue(OPT_DATA_BLOCK_ENCODING);
  dataBlockEncodingAlgo = dataBlockEncodingStr == null ? null :
      DataBlockEncoding.valueOf(dataBlockEncodingStr);

  String compressStr = cmd.getOptionValue(OPT_COMPRESSION);
  compressAlgo = compressStr == null ? Compression.Algorithm.NONE :
      Compression.Algorithm.valueOf(compressStr);

  String bloomStr = cmd.getOptionValue(OPT_BLOOM);
  bloomType = bloomStr == null ? BloomType.ROW :
      BloomType.valueOf(bloomStr);

  inMemoryCF = cmd.hasOption(OPT_INMEMORY);
  if (cmd.hasOption(OPT_ENCRYPTION)) {
    cipher = Encryption.getCipher(conf, cmd.getOptionValue(OPT_ENCRYPTION));
  }

}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:LoadTestTool.java

示例11: metaTableDescriptor

import org.apache.hadoop.hbase.regionserver.BloomType; //导入依赖的package包/类
public static HTableDescriptor metaTableDescriptor(final Configuration conf)
    throws IOException {
  HTableDescriptor metaDescriptor = new HTableDescriptor(
    TableName.META_TABLE_NAME,
    new HColumnDescriptor[] {
      new HColumnDescriptor(HConstants.CATALOG_FAMILY)
        .setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
          HConstants.DEFAULT_HBASE_META_VERSIONS))
        .setInMemory(true)
        .setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE,
          HConstants.DEFAULT_HBASE_META_BLOCK_SIZE))
        .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
        // Disable blooms for meta.  Needs work.  Seems to mess w/ getClosestOrBefore.
        .setBloomFilterType(BloomType.NONE)
       });
  metaDescriptor.addCoprocessor(
    "org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint",
    null, Coprocessor.PRIORITY_SYSTEM, null);
  return metaDescriptor;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:21,代码来源:HTableDescriptor.java

示例12: createTableIfNotExists

import org.apache.hadoop.hbase.regionserver.BloomType; //导入依赖的package包/类
private void createTableIfNotExists() throws AtlasException {
    Admin admin = null;
    try {
        admin = connection.getAdmin();
        LOG.info("Checking if table {} exists", tableName.getNameAsString());
        if (!admin.tableExists(tableName)) {
            LOG.info("Creating table {}", tableName.getNameAsString());
            HTableDescriptor tableDescriptor = new HTableDescriptor(tableName);
            HColumnDescriptor columnFamily = new HColumnDescriptor(COLUMN_FAMILY);
            columnFamily.setMaxVersions(1);
            columnFamily.setDataBlockEncoding(DataBlockEncoding.FAST_DIFF);
            columnFamily.setCompressionType(Compression.Algorithm.GZ);
            columnFamily.setBloomFilterType(BloomType.ROW);
            tableDescriptor.addFamily(columnFamily);
            admin.createTable(tableDescriptor);
        } else {
            LOG.info("Table {} exists", tableName.getNameAsString());
        }
    } catch (IOException e) {
        throw new AtlasException(e);
    } finally {
        close(admin);
    }
}
 
开发者ID:apache,项目名称:incubator-atlas,代码行数:25,代码来源:HBaseBasedAuditRepository.java

示例13: ignoredOptionsAreIgnored

import org.apache.hadoop.hbase.regionserver.BloomType; //导入依赖的package包/类
@Test
public void ignoredOptionsAreIgnored() {
  // We're really checking to make certain we don't trigger an exception for an ignored option:
  descriptor.setCompressionType(Compression.Algorithm.LZ4);
  descriptor.setCompactionCompressionType(Compression.Algorithm.LZ4);
  descriptor.setDataBlockEncoding(DataBlockEncoding.FAST_DIFF);
  descriptor.setBlockCacheEnabled(false);
  descriptor.setCacheDataOnWrite(true);
  descriptor.setCacheDataInL1(true);
  descriptor.setEvictBlocksOnClose(false);
  descriptor.setBloomFilterType(BloomType.ROW);
  descriptor.setPrefetchBlocksOnOpen(true);
  descriptor.setBlocksize(16 * 1024);
  descriptor.setScope(1); // REPLICATION_SCOPE
  descriptor.setInMemory(true);

  ColumnFamily.Builder result = adapter.adapt(descriptor)
      .clearName()
      .clearGcExpression();

  Assert.assertArrayEquals(
      new byte[0],
      result.build().toByteArray());
}
 
开发者ID:dmmcerlean,项目名称:cloud-bigtable-client,代码行数:25,代码来源:TestColumnDescriptorAdapter.java

示例14: postStartMaster

import org.apache.hadoop.hbase.regionserver.BloomType; //导入依赖的package包/类
/********************************* Master related hooks **********************************/

  @Override
  public void postStartMaster(ObserverContext<MasterCoprocessorEnvironment> ctx) throws IOException {
    // Need to create the new system table for labels here
    MasterServices master = ctx.getEnvironment().getMasterServices();
    if (!MetaTableAccessor.tableExists(master.getConnection(), LABELS_TABLE_NAME)) {
      HTableDescriptor labelsTable = new HTableDescriptor(LABELS_TABLE_NAME);
      HColumnDescriptor labelsColumn = new HColumnDescriptor(LABELS_TABLE_FAMILY);
      labelsColumn.setBloomFilterType(BloomType.NONE);
      labelsColumn.setBlockCacheEnabled(false); // We will cache all the labels. No need of normal
                                                 // table block cache.
      labelsTable.addFamily(labelsColumn);
      // Let the "labels" table having only one region always. We are not expecting too many labels in
      // the system.
      labelsTable.setValue(HTableDescriptor.SPLIT_POLICY,
          DisabledRegionSplitPolicy.class.getName());
      labelsTable.setValue(Bytes.toBytes(HConstants.DISALLOW_WRITES_IN_RECOVERING),
          Bytes.toBytes(true));
      master.createTable(labelsTable, null);
    }
  }
 
开发者ID:grokcoder,项目名称:pbase,代码行数:23,代码来源:VisibilityController.java

示例15: createTable

import org.apache.hadoop.hbase.regionserver.BloomType; //导入依赖的package包/类
/**
 * Create a table.
 * @param htd
 * @param families
 * @param c Configuration to use
 * @return An HTable instance for the created table.
 * @throws IOException
 */
public HTable createTable(HTableDescriptor htd, byte[][] families, Configuration c)
throws IOException {
  for(byte[] family : families) {
    HColumnDescriptor hcd = new HColumnDescriptor(family);
    // Disable blooms (they are on by default as of 0.95) but we disable them here because
    // tests have hard coded counts of what to expect in block cache, etc., and blooms being
    // on is interfering.
    hcd.setBloomFilterType(BloomType.NONE);
    htd.addFamily(hcd);
  }
  getHBaseAdmin().createTable(htd);
  // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
  waitUntilAllRegionsAssigned(htd.getTableName());
  return (HTable)getConnection().getTable(htd.getTableName());
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:24,代码来源:HBaseTestingUtility.java


注:本文中的org.apache.hadoop.hbase.regionserver.BloomType类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。