當前位置: 首頁>>代碼示例>>Java>>正文


Java HColumnDescriptor.setMaxVersions方法代碼示例

本文整理匯總了Java中org.apache.hadoop.hbase.HColumnDescriptor.setMaxVersions方法的典型用法代碼示例。如果您正苦於以下問題:Java HColumnDescriptor.setMaxVersions方法的具體用法?Java HColumnDescriptor.setMaxVersions怎麽用?Java HColumnDescriptor.setMaxVersions使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.hbase.HColumnDescriptor的用法示例。


在下文中一共展示了HColumnDescriptor.setMaxVersions方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: createTable

import org.apache.hadoop.hbase.HColumnDescriptor; //導入方法依賴的package包/類
public void createTable() throws Exception {

		HColumnDescriptor family1 = new HColumnDescriptor(firstFamily);
		HColumnDescriptor family2 = new HColumnDescriptor(secondFamily);
		family1.setMaxVersions(3);
		family2.setMaxVersions(3);

		HTableDescriptor descriptor = new HTableDescriptor(TableName.valueOf(nameSpaceName + ":" + tableName));
		descriptor.addFamily(family1);
		descriptor.addFamily(family2);
		descriptor.setRegionReplication(3); // replication
		admin.createTable(descriptor);
		// admin.split(TableName.valueOf("StudentInfo:student1"),
		// Bytes.toBytes("10"));
		// admin.split(TableName.valueOf("StudentInfo:student1"),
		// Bytes.toBytes("20"));
		// admin.split(TableName.valueOf("StudentInfo:student1"),
		// Bytes.toBytes("30"));
		// admin.split(TableName.valueOf("StudentInfo:student1"),
		// Bytes.toBytes("40"));
		// admin.split(TableName.valueOf("StudentInfo:student1"),
		// Bytes.toBytes("50"));
		// admin.split(TableName.valueOf("StudentInfo:student1"),
		// Bytes.toBytes("60"));
	}
 
開發者ID:husky00,項目名稱:worm,代碼行數:26,代碼來源:StoreToHbase.java

示例2: addColumn

import org.apache.hadoop.hbase.HColumnDescriptor; //導入方法依賴的package包/類
/**
 * 往表中添加列族
 *
 * @param tableName  表名
 * @param familyName 列族名
 */
public void addColumn(String tableName, String familyName) {
    HBaseConfiguration hBaseConfiguration = new HBaseConfiguration();
    Admin admin = hBaseConfiguration.admin();
    TableName tb = TableName.valueOf(tableName);
    try {
        if (admin.tableExists(tb)) {
            HColumnDescriptor columnDescriptor = new HColumnDescriptor(familyName);

            columnDescriptor.setMaxVersions(1);//設置列族保留的最多版本
            columnDescriptor.setCompressionType(Compression.Algorithm.GZ);//設置壓縮算法
            columnDescriptor.setCompactionCompressionType(Compression.Algorithm.GZ);//合並壓縮算法

            admin.addColumn(tb, columnDescriptor);
        } else {
            log.info("表名【" + tableName + "】不存在");
        }
    } catch (IOException e) {
        log.error(e);
    } finally {
        hBaseConfiguration.close();
    }
}
 
開發者ID:mumuhadoop,項目名稱:mumu-hbase,代碼行數:29,代碼來源:HBaseTableOperation.java

示例3: createWriteTable

import org.apache.hadoop.hbase.HColumnDescriptor; //導入方法依賴的package包/類
private void createWriteTable(int numberOfServers) throws IOException {
  int numberOfRegions = (int)(numberOfServers * regionsLowerLimit);
  LOG.info("Number of live regionservers: " + numberOfServers + ", "
      + "pre-splitting the canary table into " + numberOfRegions + " regions "
      + "(current  lower limi of regions per server is " + regionsLowerLimit
      + " and you can change it by config: "
      + HConstants.HBASE_CANARY_WRITE_PERSERVER_REGIONS_LOWERLIMIT_KEY + " )");
  HTableDescriptor desc = new HTableDescriptor(writeTableName);
  HColumnDescriptor family = new HColumnDescriptor(CANARY_TABLE_FAMILY_NAME);
  family.setMaxVersions(1);
  family.setTimeToLive(writeDataTTL);

  desc.addFamily(family);
  byte[][] splits = new RegionSplitter.HexStringSplit().split(numberOfRegions);
  admin.createTable(desc, splits);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:17,代碼來源:Canary.java

示例4: testModifyColumn

import org.apache.hadoop.hbase.HColumnDescriptor; //導入方法依賴的package包/類
@Test (timeout=180000)
public void testModifyColumn() throws Exception {
  final HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAMILY);
  hcd.setMaxVersions(10);
  AccessTestAction action = new AccessTestAction() {
    @Override
    public Object run() throws Exception {
      ACCESS_CONTROLLER.preModifyColumn(ObserverContext.createAndPrepare(CP_ENV, null),
          TEST_TABLE, hcd);
      return null;
    }
  };

  verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_CREATE, USER_OWNER, USER_ADMIN_CF,
    USER_GROUP_CREATE, USER_GROUP_ADMIN);
  verifyDenied(action, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:18,代碼來源:TestAccessController.java

示例5: setUp

import org.apache.hadoop.hbase.HColumnDescriptor; //導入方法依賴的package包/類
@Before
public void setUp() throws Exception {
  testVals = makeTestVals();

  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(this.getClass().getSimpleName()));
  HColumnDescriptor hcd0 = new HColumnDescriptor(FAMILIES[0]);
  hcd0.setMaxVersions(3);
  htd.addFamily(hcd0);
  HColumnDescriptor hcd1 = new HColumnDescriptor(FAMILIES[1]);
  hcd1.setMaxVersions(3);
  htd.addFamily(hcd1);
  HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
  this.region = HRegion.createHRegion(info, TEST_UTIL.getDataTestDir(),
    TEST_UTIL.getConfiguration(), htd);
  addData();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:17,代碼來源:TestDependentColumnFilter.java

示例6: setupTables

import org.apache.hadoop.hbase.HColumnDescriptor; //導入方法依賴的package包/類
private static void setupTables() throws IOException {
  // Get the table name.
  tableName = TableName.valueOf(util.getConfiguration()
      .get("hbase.IntegrationTestMTTR.tableName", "IntegrationTestMTTR"));

  loadTableName = TableName.valueOf(util.getConfiguration()
      .get("hbase.IntegrationTestMTTR.loadTableName", "IntegrationTestMTTRLoadTestTool"));

  if (util.getHBaseAdmin().tableExists(tableName)) {
    util.deleteTable(tableName);
  }

  if (util.getHBaseAdmin().tableExists(loadTableName)) {
    util.deleteTable(loadTableName);
  }

  // Create the table.  If this fails then fail everything.
  HTableDescriptor tableDescriptor = new HTableDescriptor(tableName);

  // Make the max file size huge so that splits don't happen during the test.
  tableDescriptor.setMaxFileSize(Long.MAX_VALUE);

  HColumnDescriptor descriptor = new HColumnDescriptor(FAMILY);
  descriptor.setMaxVersions(1);
  tableDescriptor.addFamily(descriptor);
  util.getHBaseAdmin().createTable(tableDescriptor);

  // Setup the table for LoadTestTool
  int ret = loadTool.run(new String[]{"-tn", loadTableName.getNameAsString(), "-init_only"});
  assertEquals("Failed to initialize LoadTestTool", 0, ret);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:32,代碼來源:IntegrationTestMTTR.java

示例7: getDesc

import org.apache.hadoop.hbase.HColumnDescriptor; //導入方法依賴的package包/類
@Override
public HTableDescriptor getDesc() {
    final HTableDescriptor desc = new HTableDescriptor(getName());
    final HColumnDescriptor colDesc = new HColumnDescriptor(NAME_FAMILY);
    colDesc.setMaxVersions(1);
    desc.addFamily(colDesc);
    return desc;
}
 
開發者ID:gchq,項目名稱:stroom-stats,代碼行數:9,代碼來源:HBaseUniqueIdReverseMapTable.java

示例8: getDesc

import org.apache.hadoop.hbase.HColumnDescriptor; //導入方法依賴的package包/類
@Override
public HTableDescriptor getDesc() {
    final HTableDescriptor desc = new HTableDescriptor(getName());
    final HColumnDescriptor colDesc = new HColumnDescriptor(ID_FAMILY);
    colDesc.setMaxVersions(1);
    desc.addFamily(colDesc);
    return desc;
}
 
開發者ID:gchq,項目名稱:stroom-stats,代碼行數:9,代碼來源:HBaseUniqueIdForwardMapTable.java

示例9: createTable

import org.apache.hadoop.hbase.HColumnDescriptor; //導入方法依賴的package包/類
private Table createTable(TableName tableName) throws IOException {
  HTableDescriptor htd = new HTableDescriptor(tableName);
  HColumnDescriptor hcd = new HColumnDescriptor(FAMILY1);
  hcd.setMaxVersions(10);// Just setting 10 as I am not testing with more than 10 versions here
  htd.addFamily(hcd);
  TEST_UTIL.getHBaseAdmin().createTable(htd, Bytes.toBytes(0), Bytes.toBytes(120), 5);
  Table ht = new HTable(TEST_UTIL.getConfiguration(), tableName);
  return ht;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:10,代碼來源:TestBulkDeleteProtocol.java

示例10: setUp

import org.apache.hadoop.hbase.HColumnDescriptor; //導入方法依賴的package包/類
@Before
public void setUp() throws Exception {
  // Create the test table (owner added to the _acl_ table)
  Admin admin = TEST_UTIL.getHBaseAdmin();
  HTableDescriptor htd = new HTableDescriptor(TEST_TABLE.getTableName());
  HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAMILY);
  hcd.setMaxVersions(4);
  htd.setOwner(USER_OWNER);
  htd.addFamily(hcd);
  admin.createTable(htd, new byte[][] { Bytes.toBytes("s") });
  TEST_UTIL.waitTableEnabled(TEST_TABLE.getTableName());
  LOG.info("Sleeping a second because of HBASE-12581");
  Threads.sleep(1000);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:15,代碼來源:TestCellACLs.java

示例11: createTestTable

import org.apache.hadoop.hbase.HColumnDescriptor; //導入方法依賴的package包/類
private void createTestTable(TableName tname) throws Exception {
  HTableDescriptor htd = new HTableDescriptor(tname);
  HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAMILY);
  hcd.setMaxVersions(100);
  htd.addFamily(hcd);
  htd.setOwner(USER_OWNER);
  createTable(TEST_UTIL, htd, new byte[][]{Bytes.toBytes("s")});
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:9,代碼來源:TestAccessController.java

示例12: createTableWithNonDefaultProperties

import org.apache.hadoop.hbase.HColumnDescriptor; //導入方法依賴的package包/類
private void createTableWithNonDefaultProperties() throws Exception {
  final long startTime = System.currentTimeMillis();
  final String sourceTableNameAsString = STRING_TABLE_NAME + startTime;
  originalTableName = TableName.valueOf(sourceTableNameAsString);

  // enable replication on a column family
  HColumnDescriptor maxVersionsColumn = new HColumnDescriptor(MAX_VERSIONS_FAM);
  HColumnDescriptor bloomFilterColumn = new HColumnDescriptor(BLOOMFILTER_FAM);
  HColumnDescriptor dataBlockColumn = new HColumnDescriptor(COMPRESSED_FAM);
  HColumnDescriptor blockSizeColumn = new HColumnDescriptor(BLOCKSIZE_FAM);

  maxVersionsColumn.setMaxVersions(MAX_VERSIONS);
  bloomFilterColumn.setBloomFilterType(BLOOM_TYPE);
  dataBlockColumn.setDataBlockEncoding(DATA_BLOCK_ENCODING_TYPE);
  blockSizeColumn.setBlocksize(BLOCK_SIZE);

  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(sourceTableNameAsString));
  htd.addFamily(maxVersionsColumn);
  htd.addFamily(bloomFilterColumn);
  htd.addFamily(dataBlockColumn);
  htd.addFamily(blockSizeColumn);
  htd.setValue(TEST_CUSTOM_VALUE, TEST_CUSTOM_VALUE);
  htd.setConfiguration(TEST_CONF_CUSTOM_VALUE, TEST_CONF_CUSTOM_VALUE);
  assertTrue(htd.getConfiguration().size() > 0);

  admin.createTable(htd);
  Table original = new HTable(UTIL.getConfiguration(), originalTableName);
  originalTableName = TableName.valueOf(sourceTableNameAsString);
  originalTableDescriptor = admin.getTableDescriptor(originalTableName);
  originalTableDescription = originalTableDescriptor.toStringCustomizedValues();

  original.close();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:34,代碼來源:TestSnapshotMetadata.java

示例13: getTableDesc

import org.apache.hadoop.hbase.HColumnDescriptor; //導入方法依賴的package包/類
private HTableDescriptor getTableDesc(TableName tableName, byte[]... families) {
  HTableDescriptor htd = new HTableDescriptor(tableName);
  for (byte[] family : families) {
    HColumnDescriptor hcd = new HColumnDescriptor(family);
    // Set default to be three versions.
    hcd.setMaxVersions(Integer.MAX_VALUE);
    htd.addFamily(hcd);
  }
  return htd;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:11,代碼來源:TestStoreFileRefresherChore.java

示例14: createTable

import org.apache.hadoop.hbase.HColumnDescriptor; //導入方法依賴的package包/類
private static void createTable(Admin admin, TableName tableName) throws IOException {
  HTableDescriptor desc = new HTableDescriptor(tableName);
  HColumnDescriptor hcd = new HColumnDescriptor("col");
  hcd.setMaxVersions(3);
  desc.addFamily(hcd);
  admin.createTable(desc);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:8,代碼來源:TestScannerWithBulkload.java

示例15: create

import org.apache.hadoop.hbase.HColumnDescriptor; //導入方法依賴的package包/類
private static void create(Admin admin, TableName tableName, byte[]... families)
    throws IOException {
  HTableDescriptor desc = new HTableDescriptor(tableName);
  for (byte[] family : families) {
    HColumnDescriptor colDesc = new HColumnDescriptor(family);
    colDesc.setMaxVersions(1);
    colDesc.setCompressionType(Algorithm.GZ);
    desc.addFamily(colDesc);
  }
  try {
    admin.createTable(desc);
  } catch (TableExistsException tee) {
    /* Ignore */
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:16,代碼來源:TestSCVFWithMiniCluster.java


注:本文中的org.apache.hadoop.hbase.HColumnDescriptor.setMaxVersions方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。