当前位置: 首页>>代码示例>>Java>>正文


Java HTableDescriptor.getColumnFamilies方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.HTableDescriptor.getColumnFamilies方法的典型用法代码示例。如果您正苦于以下问题:Java HTableDescriptor.getColumnFamilies方法的具体用法?Java HTableDescriptor.getColumnFamilies怎么用?Java HTableDescriptor.getColumnFamilies使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.HTableDescriptor的用法示例。


在下文中一共展示了HTableDescriptor.getColumnFamilies方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testEnableReplicationWhenTableDescriptorIsNotSameInClusters

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
@Test(timeout = 300000)
public void testEnableReplicationWhenTableDescriptorIsNotSameInClusters() throws Exception {
  HTableDescriptor table = admin2.getTableDescriptor(tableName);
  HColumnDescriptor f = new HColumnDescriptor("newFamily");
  table.addFamily(f);
  admin2.disableTable(tableName);
  admin2.modifyTable(tableName, table);
  admin2.enableTable(tableName);

  try {
    adminExt.enableTableRep(tableName);
    fail("Exception should be thrown if table descriptors in the clusters are not same.");
  } catch (RuntimeException ignored) {

  }
  admin1.disableTable(tableName);
  admin1.modifyTable(tableName, table);
  admin1.enableTable(tableName);
  adminExt.enableTableRep(tableName);
  table = admin1.getTableDescriptor(tableName);
  for (HColumnDescriptor fam : table.getColumnFamilies()) {
    assertEquals(fam.getScope(), HConstants.REPLICATION_SCOPE_GLOBAL);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:25,代码来源:TestReplicationAdminWithClusters.java

示例2: perform

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
@Override
public void perform() throws Exception {
  HTableDescriptor tableDescriptor = admin.getTableDescriptor(tableName);
  HColumnDescriptor[] columnDescriptors = tableDescriptor.getColumnFamilies();

  if (columnDescriptors == null || columnDescriptors.length == 0) {
    return;
  }

  LOG.debug("Performing action: Changing encodings on " + tableName);
  // possible DataBlockEncoding id's
  int[] possibleIds = {0, 2, 3, 4, 6};
  for (HColumnDescriptor descriptor : columnDescriptors) {
    short id = (short) possibleIds[random.nextInt(possibleIds.length)];
    descriptor.setDataBlockEncoding(DataBlockEncoding.getEncodingById(id));
    LOG.debug("Set encoding of column family " + descriptor.getNameAsString()
      + " to: " + descriptor.getDataBlockEncoding());
  }

  // Don't try the modify if we're stopping
  if (context.isStopping()) {
    return;
  }
  admin.modifyTable(tableName, tableDescriptor);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:26,代码来源:ChangeEncodingAction.java

示例3: perform

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
@Override
public void perform() throws Exception {
  HTableDescriptor tableDescriptor = admin.getTableDescriptor(tableName);
  HColumnDescriptor[] columnDescriptors = tableDescriptor.getColumnFamilies();

  if ( columnDescriptors == null || columnDescriptors.length == 0) {
    return;
  }

  int versions =  random.nextInt(3) + 1;
  for(HColumnDescriptor descriptor:columnDescriptors) {
    descriptor.setVersions(versions, versions);
  }
  // Don't try the modify if we're stopping
  if (context.isStopping()) {
    return;
  }
  LOG.debug("Performing action: Changing versions on " + tableName.getNameAsString());
  admin.modifyTable(tableName, tableDescriptor);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:21,代码来源:ChangeVersionsAction.java

示例4: perform

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
@Override
public void perform() throws Exception {
  HTableDescriptor tableDescriptor = admin.getTableDescriptor(tableName);
  HColumnDescriptor[] columnDescriptors = tableDescriptor.getColumnFamilies();

  if (columnDescriptors.length <= (protectedColumns == null ? 1 : protectedColumns.size())) {
    return;
  }

  int index = random.nextInt(columnDescriptors.length);
  while(protectedColumns != null &&
        protectedColumns.contains(columnDescriptors[index].getNameAsString())) {
    index = random.nextInt(columnDescriptors.length);
  }
  byte[] colDescName = columnDescriptors[index].getName();
  LOG.debug("Performing action: Removing " + Bytes.toString(colDescName)+ " from "
      + tableName.getNameAsString());
  tableDescriptor.removeFamily(colDescName);

  // Don't try the modify if we're stopping
  if (context.isStopping()) {
    return;
  }
  admin.modifyTable(tableName, tableDescriptor);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:26,代码来源:RemoveColumnAction.java

示例5: setInfoFamilyCachingForMeta

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
/**
 * Enable in memory caching for hbase:meta
 */
public static void setInfoFamilyCachingForMeta(final HTableDescriptor metaDescriptor,
    final boolean b) {
  for (HColumnDescriptor hcd: metaDescriptor.getColumnFamilies()) {
    if (Bytes.equals(hcd.getName(), HConstants.CATALOG_FAMILY)) {
      hcd.setBlockCacheEnabled(b);
      hcd.setInMemory(b);
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:13,代码来源:MasterFileSystem.java

示例6: checkCompression

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
private void checkCompression(final HTableDescriptor htd)
throws IOException {
  if (!this.masterCheckCompression) return;
  for (HColumnDescriptor hcd : htd.getColumnFamilies()) {
    checkCompression(hcd);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:8,代码来源:HMaster.java

示例7: checkEncryption

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
private void checkEncryption(final Configuration conf, final HTableDescriptor htd)
throws IOException {
  if (!this.masterCheckEncryption) return;
  for (HColumnDescriptor hcd : htd.getColumnFamilies()) {
    checkEncryption(conf, hcd);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:8,代码来源:HMaster.java

示例8: perform

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
@Override
public void perform() throws Exception {
  Random random = new Random();
  HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility();
  Admin admin = util.getHBaseAdmin();

  LOG.info("Performing action: Change bloom filter on all columns of table "
      + tableName);
  HTableDescriptor tableDescriptor = admin.getTableDescriptor(tableName);
  HColumnDescriptor[] columnDescriptors = tableDescriptor.getColumnFamilies();

  if (columnDescriptors == null || columnDescriptors.length == 0) {
    return;
  }

  final BloomType[] bloomArray = BloomType.values();
  final int bloomArraySize = bloomArray.length;

  for (HColumnDescriptor descriptor : columnDescriptors) {
    int bloomFilterIndex = random.nextInt(bloomArraySize);
    LOG.debug("Performing action: About to set bloom filter type to "
        + bloomArray[bloomFilterIndex] + " on column "
        + descriptor.getNameAsString() + " of table " + tableName);
    descriptor.setBloomFilterType(bloomArray[bloomFilterIndex]);
    LOG.debug("Performing action: Just set bloom filter type to "
        + bloomArray[bloomFilterIndex] + " on column "
        + descriptor.getNameAsString() + " of table " + tableName);
  }

  // Don't try the modify if we're stopping
  if (context.isStopping()) {
    return;
  }
  admin.modifyTable(tableName, tableDescriptor);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:36,代码来源:ChangeBloomFilterAction.java

示例9: listReplicated

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
/**
 * Find all column families that are replicated from this cluster
 * @return the full list of the replicated column families of this cluster as:
 *        tableName, family name, replicationType
 *
 * Currently replicationType is Global. In the future, more replication
 * types may be extended here. For example
 *  1) the replication may only apply to selected peers instead of all peers
 *  2) the replicationType may indicate the host Cluster servers as Slave
 *     for the table:columnFam.
 */
public List<HashMap<String, String>> listReplicated() throws IOException {
  List<HashMap<String, String>> replicationColFams = new ArrayList<HashMap<String, String>>();

  Admin admin = connection.getAdmin();
  HTableDescriptor[] tables;
  try {
    tables = admin.listTables();
  } finally {
    if (admin!= null) admin.close();
  }

  for (HTableDescriptor table : tables) {
    HColumnDescriptor[] columns = table.getColumnFamilies();
    String tableName = table.getNameAsString();
    for (HColumnDescriptor column : columns) {
      if (column.getScope() != HConstants.REPLICATION_SCOPE_LOCAL) {
        // At this moment, the columfam is replicated to all peers
        HashMap<String, String> replicationEntry = new HashMap<String, String>();
        replicationEntry.put(TNAME, tableName);
        replicationEntry.put(CFNAME, column.getNameAsString());
        replicationEntry.put(REPLICATIONTYPE, REPLICATIONGLOBAL);
        replicationColFams.add(replicationEntry);
      }
    }
  }

  return replicationColFams;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:40,代码来源:ReplicationAdmin.java

示例10: checkCompactionPolicy

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
private void checkCompactionPolicy(Configuration conf, HTableDescriptor htd)
    throws IOException {
  // FIFO compaction has some requirements
  // Actually FCP ignores periodic major compactions
  String className =
      htd.getConfigurationValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY);
  if (className == null) {
    className =
        conf.get(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY,
          ExploringCompactionPolicy.class.getName());
  }

  int blockingFileCount = HStore.DEFAULT_BLOCKING_STOREFILE_COUNT;
  String sv = htd.getConfigurationValue(HStore.BLOCKING_STOREFILES_KEY);
  if (sv != null) {
    blockingFileCount = Integer.parseInt(sv);
  } else {
    blockingFileCount = conf.getInt(HStore.BLOCKING_STOREFILES_KEY, blockingFileCount);
  }

  for (HColumnDescriptor hcd : htd.getColumnFamilies()) {
    String compactionPolicy =
        hcd.getConfigurationValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY);
    if (compactionPolicy == null) {
      compactionPolicy = className;
    }
    if (!compactionPolicy.equals(FIFOCompactionPolicy.class.getName())) {
      continue;
    }
    // FIFOCompaction
    String message = null;

    // 1. Check TTL
    if (hcd.getTimeToLive() == HColumnDescriptor.DEFAULT_TTL) {
      message = "Default TTL is not supported for FIFO compaction";
      throw new IOException(message);
    }

    // 2. Check min versions
    if (hcd.getMinVersions() > 0) {
      message = "MIN_VERSION > 0 is not supported for FIFO compaction";
      throw new IOException(message);
    }

    // 3. blocking file count
    String sbfc = htd.getConfigurationValue(HStore.BLOCKING_STOREFILES_KEY);
    if (sbfc != null) {
      blockingFileCount = Integer.parseInt(sbfc);
    }
    if (blockingFileCount < 1000) {
      message =
          "blocking file count '" + HStore.BLOCKING_STOREFILES_KEY + "' " + blockingFileCount
              + " is below recommended minimum of 1000";
      throw new IOException(message);
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:58,代码来源:HMaster.java

示例11: perform

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
@Override
public void perform() throws Exception {
  HTableDescriptor tableDescriptor = admin.getTableDescriptor(tableName);
  HColumnDescriptor[] columnDescriptors = tableDescriptor.getColumnFamilies();

  if (columnDescriptors == null || columnDescriptors.length == 0) {
    return;
  }

  // Possible compression algorithms. If an algorithm is not supported,
  // modifyTable will fail, so there is no harm.
  Algorithm[] possibleAlgos = Algorithm.values();

  // Since not every compression algorithm is supported,
  // let's use the same algorithm for all column families.

  // If an unsupported compression algorithm is chosen, pick a different one.
  // This is to work around the issue that modifyTable() does not throw remote
  // exception.
  Algorithm algo;
  do {
    algo = possibleAlgos[random.nextInt(possibleAlgos.length)];

    try {
      Compressor c = algo.getCompressor();

      // call returnCompressor() to release the Compressor
      algo.returnCompressor(c);
      break;
    } catch (Throwable t) {
      LOG.info("Performing action: Changing compression algorithms to " + algo +
              " is not supported, pick another one");
    }
  } while (true);

  LOG.debug("Performing action: Changing compression algorithms on "
    + tableName.getNameAsString() + " to " + algo);
  for (HColumnDescriptor descriptor : columnDescriptors) {
    if (random.nextBoolean()) {
      descriptor.setCompactionCompressionType(algo);
    } else {
      descriptor.setCompressionType(algo);
    }
  }

  // Don't try the modify if we're stopping
  if (context.isStopping()) {
    return;
  }

  admin.modifyTable(tableName, tableDescriptor);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:53,代码来源:ChangeCompressionAction.java


注:本文中的org.apache.hadoop.hbase.HTableDescriptor.getColumnFamilies方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。