當前位置: 首頁>>代碼示例>>Java>>正文


Java HColumnDescriptor類代碼示例

本文整理匯總了Java中org.apache.hadoop.hbase.HColumnDescriptor的典型用法代碼示例。如果您正苦於以下問題:Java HColumnDescriptor類的具體用法?Java HColumnDescriptor怎麽用?Java HColumnDescriptor使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。


HColumnDescriptor類屬於org.apache.hadoop.hbase包,在下文中一共展示了HColumnDescriptor類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: createTable

import org.apache.hadoop.hbase.HColumnDescriptor; //導入依賴的package包/類
public void createTable() throws Exception {

		HColumnDescriptor family1 = new HColumnDescriptor(firstFamily);
		HColumnDescriptor family2 = new HColumnDescriptor(secondFamily);
		family1.setMaxVersions(3);
		family2.setMaxVersions(3);

		HTableDescriptor descriptor = new HTableDescriptor(TableName.valueOf(nameSpaceName + ":" + tableName));
		descriptor.addFamily(family1);
		descriptor.addFamily(family2);
		descriptor.setRegionReplication(3); // replication
		admin.createTable(descriptor);
		// admin.split(TableName.valueOf("StudentInfo:student1"),
		// Bytes.toBytes("10"));
		// admin.split(TableName.valueOf("StudentInfo:student1"),
		// Bytes.toBytes("20"));
		// admin.split(TableName.valueOf("StudentInfo:student1"),
		// Bytes.toBytes("30"));
		// admin.split(TableName.valueOf("StudentInfo:student1"),
		// Bytes.toBytes("40"));
		// admin.split(TableName.valueOf("StudentInfo:student1"),
		// Bytes.toBytes("50"));
		// admin.split(TableName.valueOf("StudentInfo:student1"),
		// Bytes.toBytes("60"));
	}
 
開發者ID:husky00,項目名稱:worm,代碼行數:26,代碼來源:StoreToHbase.java

示例2: creatTable

import org.apache.hadoop.hbase.HColumnDescriptor; //導入依賴的package包/類
/**
 * create table
 */
public static void creatTable(String tableName, String[] familys)
        throws Exception {
    Admin admin = connection.getAdmin();
    List<HRegionInfo> list = admin.getTableRegions(TableName.valueOf(tableName));
    if (admin.tableExists(TableName.valueOf(tableName))) {
        System.out.println("table already exists!");
    } else {
        HTableDescriptor tableDesc = new HTableDescriptor(tableName);
        for (int i = 0; i < familys.length; i++) {
            tableDesc.addFamily(new HColumnDescriptor(familys[i]));
        }
        admin.createTable(tableDesc);
        System.out.println("create table " + tableName + " ok.");
    }
}
 
開發者ID:yjp123456,項目名稱:SparkDemo,代碼行數:19,代碼來源:HBaseTest.java

示例3: setUp

import org.apache.hadoop.hbase.HColumnDescriptor; //導入依賴的package包/類
@Before
public void setUp() throws Exception {
  HTableDescriptor htd = new HTableDescriptor(
      TableName.valueOf(TABLE_NAME_BYTES));
  htd.addFamily(new HColumnDescriptor(FAMILY_NAME_BYTES));
  HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
  this.region = HRegion.createHRegion(info, TEST_UTIL.getDataTestDir(),
      TEST_UTIL.getConfiguration(), htd);

  Put put = new Put(ROW_BYTES);
  for (int i = 0; i < 10; i += 2) {
    // puts 0, 2, 4, 6 and 8
    put.add(FAMILY_NAME_BYTES, Bytes.toBytes(QUALIFIER_PREFIX + i), i,
        Bytes.toBytes(VALUE_PREFIX + i));
  }
  this.region.put(put);
  this.region.flush(true);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:19,代碼來源:TestInvocationRecordFilter.java

示例4: addColumn

import org.apache.hadoop.hbase.HColumnDescriptor; //導入依賴的package包/類
/**
 * 往表中添加列族
 *
 * @param tableName  表名
 * @param familyName 列族名
 */
public void addColumn(String tableName, String familyName) {
    HBaseConfiguration hBaseConfiguration = new HBaseConfiguration();
    Admin admin = hBaseConfiguration.admin();
    TableName tb = TableName.valueOf(tableName);
    try {
        if (admin.tableExists(tb)) {
            HColumnDescriptor columnDescriptor = new HColumnDescriptor(familyName);

            columnDescriptor.setMaxVersions(1);//設置列族保留的最多版本
            columnDescriptor.setCompressionType(Compression.Algorithm.GZ);//設置壓縮算法
            columnDescriptor.setCompactionCompressionType(Compression.Algorithm.GZ);//合並壓縮算法

            admin.addColumn(tb, columnDescriptor);
        } else {
            log.info("表名【" + tableName + "】不存在");
        }
    } catch (IOException e) {
        log.error(e);
    } finally {
        hBaseConfiguration.close();
    }
}
 
開發者ID:mumuhadoop,項目名稱:mumu-hbase,代碼行數:29,代碼來源:HBaseTableOperation.java

示例5: setUpBeforeClass

import org.apache.hadoop.hbase.HColumnDescriptor; //導入依賴的package包/類
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  // Make block sizes small.
  conf = TEST_UTIL.getConfiguration();
  conf.setInt("dfs.blocksize", 1024 * 1024);
  conf.setInt("dfs.replication", 1);
  TEST_UTIL.startMiniDFSCluster(1);

  conf = TEST_UTIL.getConfiguration();
  fs = TEST_UTIL.getDFSCluster().getFileSystem();

  hbaseDir = TEST_UTIL.createRootDir();
  
  logDir = new Path(hbaseDir, HConstants.HREGION_LOGDIR_NAME);

  htd = new HTableDescriptor(tableName);
  htd.addFamily(new HColumnDescriptor(family));
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:19,代碼來源:TestWALRecordReader.java

示例6: CacheConfig

import org.apache.hadoop.hbase.HColumnDescriptor; //導入依賴的package包/類
/**
 * Create a cache configuration using the specified configuration object and
 * family descriptor.
 * @param conf hbase configuration
 * @param family column family configuration
 */
public CacheConfig(Configuration conf, HColumnDescriptor family) {
  this(CacheConfig.instantiateBlockCache(conf),
      family.isBlockCacheEnabled(),
      family.isInMemory(),
      // For the following flags we enable them regardless of per-schema settings
      // if they are enabled in the global configuration.
      conf.getBoolean(CACHE_BLOCKS_ON_WRITE_KEY,
          DEFAULT_CACHE_DATA_ON_WRITE) || family.isCacheDataOnWrite(),
      conf.getBoolean(CACHE_INDEX_BLOCKS_ON_WRITE_KEY,
          DEFAULT_CACHE_INDEXES_ON_WRITE) || family.isCacheIndexesOnWrite(),
      conf.getBoolean(CACHE_BLOOM_BLOCKS_ON_WRITE_KEY,
          DEFAULT_CACHE_BLOOMS_ON_WRITE) || family.isCacheBloomsOnWrite(),
      conf.getBoolean(EVICT_BLOCKS_ON_CLOSE_KEY,
          DEFAULT_EVICT_ON_CLOSE) || family.isEvictBlocksOnClose(),
      conf.getBoolean(CACHE_DATA_BLOCKS_COMPRESSED_KEY, DEFAULT_CACHE_DATA_COMPRESSED),
      conf.getBoolean(PREFETCH_BLOCKS_ON_OPEN_KEY,
          DEFAULT_PREFETCH_ON_OPEN) || family.isPrefetchBlocksOnOpen(),
      conf.getBoolean(HColumnDescriptor.CACHE_DATA_IN_L1,
          HColumnDescriptor.DEFAULT_CACHE_DATA_IN_L1) || family.isCacheDataInL1(),
      conf.getBoolean(DROP_BEHIND_CACHE_COMPACTION_KEY,DROP_BEHIND_CACHE_COMPACTION_DEFAULT)
   );
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:29,代碼來源:CacheConfig.java

示例7: createTableAndGetOneRegion

import org.apache.hadoop.hbase.HColumnDescriptor; //導入依賴的package包/類
HRegionInfo createTableAndGetOneRegion(
    final TableName tableName) throws IOException, InterruptedException {
  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.addFamily(new HColumnDescriptor(FAMILY));
  admin.createTable(desc, Bytes.toBytes("A"), Bytes.toBytes("Z"), 5);

  // wait till the table is assigned
  HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
  long timeoutTime = System.currentTimeMillis() + 1000;
  while (true) {
    List<HRegionInfo> regions = master.getAssignmentManager().
      getRegionStates().getRegionsOfTable(tableName);
    if (regions.size() > 3) {
      return regions.get(2);
    }
    long now = System.currentTimeMillis();
    if (now > timeoutTime) {
      fail("Could not find an online region");
    }
    Thread.sleep(10);
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:23,代碼來源:TestAssignmentManagerOnCluster.java

示例8: initHRegion

import org.apache.hadoop.hbase.HColumnDescriptor; //導入依賴的package包/類
private static HRegion initHRegion(byte[] tableName, byte[] startKey, byte[] stopKey,
    String callingMethod, Configuration conf, byte[]... families)
    throws IOException {
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
  for(byte [] family : families) {
    HColumnDescriptor hcd = new HColumnDescriptor(family);
    hcd.setDataBlockEncoding(DataBlockEncoding.FAST_DIFF);
    htd.addFamily(hcd);
  }
  HRegionInfo info = new HRegionInfo(htd.getTableName(), startKey, stopKey, false);
  Path path = new Path(DIR + callingMethod);
  FileSystem fs = FileSystem.get(conf);
  if (fs.exists(path)) {
    if (!fs.delete(path, true)) {
      throw new IOException("Failed delete of " + path);
    }
  }
  return HRegion.createHRegion(info, path, conf, htd);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:20,代碼來源:TestJoinedScanners.java

示例9: testModifyTable

import org.apache.hadoop.hbase.HColumnDescriptor; //導入依賴的package包/類
@Test
public void testModifyTable() throws IOException {
  Admin admin = TEST_UTIL.getHBaseAdmin();
  // Create a table with one family
  HTableDescriptor baseHtd = new HTableDescriptor(TABLE_NAME);
  baseHtd.addFamily(new HColumnDescriptor(FAMILY_0));
  admin.createTable(baseHtd);
  admin.disableTable(TABLE_NAME);
  try {
    // Verify the table descriptor
    verifyTableDescriptor(TABLE_NAME, FAMILY_0);

    // Modify the table adding another family and verify the descriptor
    HTableDescriptor modifiedHtd = new HTableDescriptor(TABLE_NAME);
    modifiedHtd.addFamily(new HColumnDescriptor(FAMILY_0));
    modifiedHtd.addFamily(new HColumnDescriptor(FAMILY_1));
    admin.modifyTable(TABLE_NAME, modifiedHtd);
    verifyTableDescriptor(TABLE_NAME, FAMILY_0, FAMILY_1);
  } finally {
    admin.deleteTable(TABLE_NAME);
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:23,代碼來源:TestTableDescriptorModification.java

示例10: testCreateTableWithEmptyRowInTheSplitKeys

import org.apache.hadoop.hbase.HColumnDescriptor; //導入依賴的package包/類
@Test (timeout=300000)
public void testCreateTableWithEmptyRowInTheSplitKeys() throws IOException{
  byte[] tableName = Bytes.toBytes("testCreateTableWithEmptyRowInTheSplitKeys");
  byte[][] splitKeys = new byte[3][];
  splitKeys[0] = "region1".getBytes();
  splitKeys[1] = HConstants.EMPTY_BYTE_ARRAY;
  splitKeys[2] = "region2".getBytes();
  HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
  desc.addFamily(new HColumnDescriptor("col"));
  try {
    admin.createTable(desc, splitKeys);
    fail("Test case should fail as empty split key is passed.");
  } catch (IllegalArgumentException e) {
    LOG.info("Expected ", e);
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:17,代碼來源:TestAdmin1.java

示例11: addColumn

import org.apache.hadoop.hbase.HColumnDescriptor; //導入依賴的package包/類
/**
 * Add a column to an existing table.
 * Asynchronous operation.
 *
 * @param tableName name of the table to add column to
 * @param column column descriptor of column to be added
 * @throws IOException if a remote or network exception occurs
 */
@Override
public void addColumn(final TableName tableName, final HColumnDescriptor column)
throws IOException {
  executeCallable(new MasterCallable<Void>(getConnection()) {
    @Override
    public Void call(int callTimeout) throws ServiceException {
      PayloadCarryingRpcController controller = rpcControllerFactory.newController();
      controller.setCallTimeout(callTimeout);
      controller.setPriority(tableName);
      AddColumnRequest req = RequestConverter.buildAddColumnRequest(
        tableName, column, ng.getNonceGroup(), ng.newNonce());
      master.addColumn(controller,req);
      return null;
    }
  });
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:25,代碼來源:HBaseAdmin.java

示例12: testConstraintPasses

import org.apache.hadoop.hbase.HColumnDescriptor; //導入依賴的package包/類
/**
 * Test that we run a passing constraint
 * @throws Exception
 */
@SuppressWarnings("unchecked")
@Test
public void testConstraintPasses() throws Exception {
  // create the table
  // it would be nice if this was also a method on the util
  HTableDescriptor desc = new HTableDescriptor(tableName);
  for (byte[] family : new byte[][] { dummy, test }) {
    desc.addFamily(new HColumnDescriptor(family));
  }
  // add a constraint
  Constraints.add(desc, CheckWasRunConstraint.class);

  util.getHBaseAdmin().createTable(desc);
  Table table = new HTable(util.getConfiguration(), tableName);
  try {
    // test that we don't fail on a valid put
    Put put = new Put(row1);
    byte[] value = Integer.toString(10).getBytes();
    put.add(dummy, new byte[0], value);
    table.put(put);
  } finally {
    table.close();
  }
  assertTrue(CheckWasRunConstraint.wasRun);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:30,代碼來源:TestConstraint.java

示例13: testSplitStoreFileWithDifferentEncoding

import org.apache.hadoop.hbase.HColumnDescriptor; //導入依賴的package包/類
private void testSplitStoreFileWithDifferentEncoding(DataBlockEncoding bulkloadEncoding,
    DataBlockEncoding cfEncoding) throws IOException {
  Path dir = util.getDataTestDirOnTestFS("testSplitHFileWithDifferentEncoding");
  FileSystem fs = util.getTestFileSystem();
  Path testIn = new Path(dir, "testhfile");
  HColumnDescriptor familyDesc = new HColumnDescriptor(FAMILY);
  familyDesc.setDataBlockEncoding(cfEncoding);
  HFileTestUtil.createHFileWithDataBlockEncoding(
      util.getConfiguration(), fs, testIn, bulkloadEncoding,
      FAMILY, QUALIFIER, Bytes.toBytes("aaa"), Bytes.toBytes("zzz"), 1000);

  Path bottomOut = new Path(dir, "bottom.out");
  Path topOut = new Path(dir, "top.out");

  LoadIncrementalHFiles.splitStoreFile(
      util.getConfiguration(), testIn,
      familyDesc, Bytes.toBytes("ggg"),
      bottomOut,
      topOut);

  int rowCount = verifyHFile(bottomOut);
  rowCount += verifyHFile(topOut);
  assertEquals(1000, rowCount);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:25,代碼來源:TestLoadIncrementalHFiles.java

示例14: initHRegion

import org.apache.hadoop.hbase.HColumnDescriptor; //導入依賴的package包/類
/**
 * Callers must afterward call {@link HRegion#closeHRegion(HRegion)}
 * @param tableName
 * @param callingMethod
 * @param conf
 * @param family
 * @throws IOException
 * @return created and initialized region.
 */
private HRegion initHRegion(byte[] tableName, String callingMethod,
    HBaseConfiguration conf, String family) throws IOException {
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
  HColumnDescriptor familyDesc;
  for (int i = 0; i < BLOOM_TYPE.length; i++) {
    BloomType bloomType = BLOOM_TYPE[i];
    familyDesc = new HColumnDescriptor(family + "_" + bloomType)
        .setBlocksize(1)
        .setBloomFilterType(BLOOM_TYPE[i]);
    htd.addFamily(familyDesc);
  }

  HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
  Path path = new Path(DIR + callingMethod);
  HRegion r = HRegion.createHRegion(info, path, conf, htd);
  blockCache = new CacheConfig(conf).getBlockCache();
  return r;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:28,代碼來源:TestBlocksRead.java

示例15: createTable

import org.apache.hadoop.hbase.HColumnDescriptor; //導入依賴的package包/類
private static void createTable() {
  assertNotNull("HBaseAdmin is not initialized successfully.", admin);
  if (admin != null) {

    HTableDescriptor desc = new HTableDescriptor(name);
    HColumnDescriptor coldef = new HColumnDescriptor(Bytes.toBytes("f1"));
    desc.addFamily(coldef);

    try {
      admin.createTable(desc);
      assertTrue("Fail to create the table", admin.tableExists(name));
    } catch (IOException e) {
      assertNull("Exception found while creating table", e);
    }

  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:18,代碼來源:TestFilterWrapper.java


注:本文中的org.apache.hadoop.hbase.HColumnDescriptor類示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。