當前位置: 首頁>>代碼示例>>Java>>正文


Java HTableDescriptor.addFamily方法代碼示例

本文整理匯總了Java中org.apache.hadoop.hbase.HTableDescriptor.addFamily方法的典型用法代碼示例。如果您正苦於以下問題:Java HTableDescriptor.addFamily方法的具體用法?Java HTableDescriptor.addFamily怎麽用?Java HTableDescriptor.addFamily使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.hbase.HTableDescriptor的用法示例。


在下文中一共展示了HTableDescriptor.addFamily方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: setUp

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
@Before
public void setUp() throws Exception {
  testVals = makeTestVals();

  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(this.getClass().getSimpleName()));
  HColumnDescriptor hcd0 = new HColumnDescriptor(FAMILIES[0]);
  hcd0.setMaxVersions(3);
  htd.addFamily(hcd0);
  HColumnDescriptor hcd1 = new HColumnDescriptor(FAMILIES[1]);
  hcd1.setMaxVersions(3);
  htd.addFamily(hcd1);
  HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
  this.region = HRegion.createHRegion(info, TEST_UTIL.getDataTestDir(),
    TEST_UTIL.getConfiguration(), htd);
  addData();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:17,代碼來源:TestDependentColumnFilter.java

示例2: runTestFromCommandLine

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
@Override
public int runTestFromCommandLine() throws Exception {
  IntegrationTestingUtility.setUseDistributedCluster(getConf());
  int numPresplits = getConf().getInt("loadmapper.numPresplits", 5);
  // create HTableDescriptor for specified table
  HTableDescriptor htd = new HTableDescriptor(getTablename());
  htd.addFamily(new HColumnDescriptor(TEST_FAMILY));

  Admin admin = new HBaseAdmin(getConf());
  try {
    admin.createTable(htd, Bytes.toBytes(0L), Bytes.toBytes(-1L), numPresplits);
  } finally {
    admin.close();
  }
  doLoad(getConf(), htd);
  doVerify(getConf(), htd);
  getTestingUtil(getConf()).deleteTable(htd.getName());
  return 0;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:20,代碼來源:IntegrationTestWithCellVisibilityLoadAndVerify.java

示例3: createCCTTableDescriptor

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
protected HTableDescriptor createCCTTableDescriptor(byte[] indexColumn)
    throws IndexNotExistedException {
  IndexSpecification indexSpec = this.getIndexSpecification(indexColumn);
  HTableDescriptor indexTableDescriptor =
      new HTableDescriptor(IndexUtils.getCCTName(indexSpec.getTableName()));
  System.out.println("winter new cct table name: " + indexTableDescriptor.getTableName());
  if (indexSpec.getIndexType() == IndexType.CCIndex) {
    for (HColumnDescriptor desc : this.descriptor.getFamilies()) {
      // column is f, the only family
      indexTableDescriptor.addFamily(desc);
    }
  } else if (indexSpec.getIndexType() == IndexType.UDGIndex) {
    Set<byte[]> family = indexSpec.getAdditionMap().keySet();
    if (family.size() != 0) {
      for (byte[] name : family) {
        indexTableDescriptor.addFamily(this.descriptor.getFamily(name));
      }
    } else {
      indexTableDescriptor.addFamily(this.descriptor.getFamily(indexSpec.getFamily()));
    }
  } else if (indexSpec.getIndexType() == IndexType.GSIndex) {
    indexTableDescriptor.addFamily(this.descriptor.getFamily(indexSpec.getFamily()));
  }

  indexTableDescriptor.setValue(IndexConstants.INDEX_TYPE,
      Bytes.toBytes(indexSpec.getIndexType().toString())); // record the index type
  return indexTableDescriptor;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:29,代碼來源:IndexTableDescriptor.java

示例4: testIsUnloaded

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
/**
 * Check to make sure a constraint is unloaded when it fails
 * @throws Exception
 */
@Test
public void testIsUnloaded() throws Exception {
  // create the table
  HTableDescriptor desc = new HTableDescriptor(tableName);
  // add a family to the table
  for (byte[] family : new byte[][] { dummy, test }) {
    desc.addFamily(new HColumnDescriptor(family));
  }
  // make sure that constraints are unloaded
  Constraints.add(desc, RuntimeFailConstraint.class);
  // add a constraint to check to see if is run
  Constraints.add(desc, CheckWasRunConstraint.class);
  CheckWasRunConstraint.wasRun = false;

  util.getHBaseAdmin().createTable(desc);
  Table table = new HTable(util.getConfiguration(), tableName);

  // test that we do fail on violation
  Put put = new Put(row1);
  put.add(dummy, new byte[0], "pass".getBytes());
  
  try{
  table.put(put);
  fail("RuntimeFailConstraint wasn't triggered - this put shouldn't work!");
  } catch (Exception e) {// NOOP
  }

  // try the put again, this time constraints are not used, so it works
  table.put(put);
  // and we make sure that constraints were not run...
  assertFalse(CheckWasRunConstraint.wasRun);
  table.close();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:38,代碼來源:TestConstraint.java

示例5: testOpenFailed

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
/**
 * This tests region open failed
 */
@Test (timeout=60000)
public void testOpenFailed() throws Exception {
  String table = "testOpenFailed";
  try {
    HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(table));
    desc.addFamily(new HColumnDescriptor(FAMILY));
    admin.createTable(desc);

    Table meta = new HTable(conf, TableName.META_TABLE_NAME);
    HRegionInfo hri = new HRegionInfo(
      desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z"));
    MetaTableAccessor.addRegionToMeta(meta, hri);

    MyLoadBalancer.controledRegion = hri.getEncodedName();

    HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
    master.assignRegion(hri);
    AssignmentManager am = master.getAssignmentManager();
    assertFalse(am.waitForAssignment(hri));

    RegionState state = am.getRegionStates().getRegionState(hri);
    assertEquals(RegionState.State.FAILED_OPEN, state.getState());
    // Failed to open since no plan, so it's on no server
    assertNull(state.getServerName());

    MyLoadBalancer.controledRegion = null;
    master.assignRegion(hri);
    assertTrue(am.waitForAssignment(hri));

    ServerName serverName = master.getAssignmentManager().
      getRegionStates().getRegionServerOfRegion(hri);
    TEST_UTIL.assertRegionOnServer(hri, serverName, 6000);
  } finally {
    MyLoadBalancer.controledRegion = null;
    TEST_UTIL.deleteTable(Bytes.toBytes(table));
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:41,代碼來源:TestAssignmentManagerOnCluster.java

示例6: testScannerSelection

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
@Test
public void testScannerSelection() throws IOException {
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setInt("hbase.hstore.compactionThreshold", 10000);
  HColumnDescriptor hcd = new HColumnDescriptor(FAMILY_BYTES).setBlockCacheEnabled(true)
      .setBloomFilterType(bloomType);
  HTableDescriptor htd = new HTableDescriptor(TABLE);
  htd.addFamily(hcd);
  HRegionInfo info = new HRegionInfo(TABLE);
  HRegion region = HRegion.createHRegion(info, TEST_UTIL.getDataTestDir(), conf, htd);

  for (int iFile = 0; iFile < NUM_FILES; ++iFile) {
    for (int iRow = 0; iRow < NUM_ROWS; ++iRow) {
      Put put = new Put(Bytes.toBytes("row" + iRow));
      for (int iCol = 0; iCol < NUM_COLS_PER_ROW; ++iCol) {
        put.add(FAMILY_BYTES, Bytes.toBytes("col" + iCol),
            Bytes.toBytes("value" + iFile + "_" + iRow + "_" + iCol));
      }
      region.put(put);
    }
    region.flush(true);
  }

  Scan scan = new Scan(Bytes.toBytes("aaa"), Bytes.toBytes("aaz"));
  CacheConfig.blockCacheDisabled = false;
  CacheConfig cacheConf = new CacheConfig(conf);
  LruBlockCache cache = (LruBlockCache) cacheConf.getBlockCache();
  cache.clearCache();
  InternalScanner scanner = region.getScanner(scan);
  List<Cell> results = new ArrayList<Cell>();
  while (scanner.next(results)) {
  }
  scanner.close();
  assertEquals(0, results.size());
  Set<String> accessedFiles = cache.getCachedFileNamesForTest();
  assertEquals(expectedCount, accessedFiles.size());
  region.close();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:39,代碼來源:TestScannerSelectionUsingKeyRange.java

示例7: testCreateTableWithOnlyEmptyStartRow

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
@Test (timeout=300000)
public void testCreateTableWithOnlyEmptyStartRow() throws IOException {
  byte[] tableName = Bytes.toBytes("testCreateTableWithOnlyEmptyStartRow");
  byte[][] splitKeys = new byte[1][];
  splitKeys[0] = HConstants.EMPTY_BYTE_ARRAY;
  HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
  desc.addFamily(new HColumnDescriptor("col"));
  try {
    admin.createTable(desc, splitKeys);
    fail("Test case should fail as empty split key is passed.");
  } catch (IllegalArgumentException e) {
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:14,代碼來源:TestAdmin1.java

示例8: testAssignRegion

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
/**
 * This tests region assignment
 */
@Test (timeout=60000)
public void testAssignRegion() throws Exception {
  String table = "testAssignRegion";
  try {
    HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(table));
    desc.addFamily(new HColumnDescriptor(FAMILY));
    admin.createTable(desc);

    Table meta = new HTable(conf, TableName.META_TABLE_NAME);
    HRegionInfo hri = new HRegionInfo(
      desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z"));
    MetaTableAccessor.addRegionToMeta(meta, hri);

    HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
    master.assignRegion(hri);
    AssignmentManager am = master.getAssignmentManager();
    am.waitForAssignment(hri);

    RegionStates regionStates = am.getRegionStates();
    ServerName serverName = regionStates.getRegionServerOfRegion(hri);
    TEST_UTIL.assertRegionOnServer(hri, serverName, 6000);

    // Region is assigned now. Let's assign it again.
    // Master should not abort, and region should be assigned.
    RegionState oldState = regionStates.getRegionState(hri);
    TEST_UTIL.getHBaseAdmin().assign(hri.getRegionName());
    master.getAssignmentManager().waitForAssignment(hri);
    RegionState newState = regionStates.getRegionState(hri);
    assertTrue(newState.isOpened()
      && newState.getStamp() != oldState.getStamp());
  } finally {
    TEST_UTIL.deleteTable(Bytes.toBytes(table));
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:38,代碼來源:TestAssignmentManagerOnCluster.java

示例9: testDisableCatalogTable

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
@Test (timeout=300000)
public void testDisableCatalogTable() throws Exception {
  try {
    this.admin.disableTable(TableName.META_TABLE_NAME);
    fail("Expected to throw ConstraintException");
  } catch (ConstraintException e) {
  }
  // Before the fix for HBASE-6146, the below table creation was failing as the hbase:meta table
  // actually getting disabled by the disableTable() call.
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("testDisableCatalogTable".getBytes()));
  HColumnDescriptor hcd = new HColumnDescriptor("cf1".getBytes());
  htd.addFamily(hcd);
  TEST_UTIL.getHBaseAdmin().createTable(htd);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:15,代碼來源:TestAdmin2.java

示例10: setupMockColumnFamiliesForDataBlockEncoding

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
private void setupMockColumnFamiliesForDataBlockEncoding(Table table,
    Map<String, DataBlockEncoding> familyToDataBlockEncoding) throws IOException {
  HTableDescriptor mockTableDescriptor = new HTableDescriptor(TABLE_NAME);
  for (Entry<String, DataBlockEncoding> entry : familyToDataBlockEncoding.entrySet()) {
    mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey())
        .setMaxVersions(1)
        .setDataBlockEncoding(entry.getValue())
        .setBlockCacheEnabled(false)
        .setTimeToLive(0));
  }
  Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:13,代碼來源:TestHFileOutputFormat2.java

示例11: run

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
@Override
 public void run() {
   // create a table : master coprocessor will throw an exception and not
   // catch it.
   HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TEST_TABLE));
   htd.addFamily(new HColumnDescriptor(TEST_FAMILY));
   try {
     Admin admin = UTIL.getHBaseAdmin();
     admin.createTable(htd);
     fail("BuggyMasterObserver failed to throw an exception.");
   } catch (IOException e) {
     assertEquals("HBaseAdmin threw an interrupted IOException as expected.",
         e.getClass().getName(), "java.io.InterruptedIOException");
   }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:16,代碼來源:TestMasterCoprocessorExceptionWithAbort.java

示例12: testRollbackAndDoubleExecutionAfterPONR

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
@Test(timeout = 60000)
public void testRollbackAndDoubleExecutionAfterPONR() throws Exception {
  final TableName tableName = TableName.valueOf("testRollbackAndDoubleExecutionAfterPONR");
  final String familyToAddName = "cf2";
  final String familyToRemove = "cf1";
  final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();

  // create the table
  HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(
    procExec, tableName, null, familyToRemove);
  UTIL.getHBaseAdmin().disableTable(tableName);

  ProcedureTestingUtility.waitNoProcedureRunning(procExec);
  ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);

  HTableDescriptor htd = new HTableDescriptor(UTIL.getHBaseAdmin().getTableDescriptor(tableName));
  htd.setCompactionEnabled(!htd.isCompactionEnabled());
  htd.addFamily(new HColumnDescriptor(familyToAddName));
  htd.removeFamily(familyToRemove.getBytes());
  htd.setRegionReplication(3);

  // Start the Modify procedure && kill the executor
  long procId = procExec.submitProcedure(
    new ModifyTableProcedure(procExec.getEnvironment(), htd), nonceGroup, nonce);

  // Failing after MODIFY_TABLE_DELETE_FS_LAYOUT we should not trigger the rollback.
  // NOTE: the 5 (number of MODIFY_TABLE_DELETE_FS_LAYOUT + 1 step) is hardcoded,
  //       so you have to look at this test at least once when you add a new step.
  int numberOfSteps = 5;
  MasterProcedureTestingUtility.testRollbackAndDoubleExecutionAfterPONR(
    procExec,
    procId,
    numberOfSteps,
    ModifyTableState.values());

  // "cf2" should be added and "cf1" should be removed
  MasterProcedureTestingUtility.validateTableCreation(UTIL.getHBaseCluster().getMaster(),
    tableName, regions, false, familyToAddName);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:40,代碼來源:TestModifyTableProcedure.java

示例13: setUpBeforeClass

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  SUPERUSER = User.createUserForTesting(conf, "admin",
      new String[] { "supergroup" });
  conf = TEST_UTIL.getConfiguration();
  conf.setClass(VisibilityUtils.VISIBILITY_LABEL_GENERATOR_CLASS,
      SimpleScanLabelGenerator.class, ScanLabelGenerator.class);
  conf.setInt("hfile.format.version", 3);
  conf.set("hbase.superuser", SUPERUSER.getShortName());
  conf.set("hbase.coprocessor.master.classes", VisibilityController.class.getName());
  conf.set("hbase.coprocessor.region.classes", VisibilityController.class.getName());
  TEST_UTIL.startMiniCluster(1);
  // Wait for the labels table to become available
  TEST_UTIL.waitTableEnabled(VisibilityConstants.LABELS_TABLE_NAME.getName(), 50000);
  createLabels();
  setAuths();
  REST_TEST_UTIL.startServletContainer(conf);
  client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort()));
  context = JAXBContext.newInstance(CellModel.class, CellSetModel.class, RowModel.class,
      ScannerModel.class);
  marshaller = context.createMarshaller();
  unmarshaller = context.createUnmarshaller();
  Admin admin = TEST_UTIL.getHBaseAdmin();
  if (admin.tableExists(TABLE)) {
    return;
  }
  HTableDescriptor htd = new HTableDescriptor(TABLE);
  htd.addFamily(new HColumnDescriptor(CFA));
  htd.addFamily(new HColumnDescriptor(CFB));
  admin.createTable(htd);
  insertData(TABLE, COLUMN_1, 1.0);
  insertData(TABLE, COLUMN_2, 0.5);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:34,代碼來源:TestScannersWithLabels.java

示例14: generateHBaseDatasetFloatOB

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
public static void generateHBaseDatasetFloatOB(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }

  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.addFamily(new HColumnDescriptor(FAMILY_F));

  if (numberRegions > 1) {
    admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
  } else {
    admin.createTable(desc);
  }

  HTable table = new HTable(admin.getConfiguration(), tableName);

  for (float i = (float)0.5; i <= 100.00; i += 0.75) {
    byte[] bytes = new byte[5];
    org.apache.hadoop.hbase.util.PositionedByteRange br =
            new org.apache.hadoop.hbase.util.SimplePositionedByteRange(bytes, 0, 5);
    org.apache.hadoop.hbase.util.OrderedBytes.encodeFloat32(br, i,
            org.apache.hadoop.hbase.util.Order.ASCENDING);
    Put p = new Put(bytes);
    p.add(FAMILY_F, COLUMN_C, String.format("value %03f", i).getBytes());
    table.put(p);
  }

  table.flushCommits();
  table.close();

  admin.flush(tableName);
}
 
開發者ID:skhalifa,項目名稱:QDrill,代碼行數:34,代碼來源:TestTableGenerator.java

示例15: setUp

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
@Override
public void setUp() throws Exception {
  // setup config values necessary for store
  this.conf = TEST_UTIL.getConfiguration();
  this.conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, 0);
  this.conf.setInt("hbase.hstore.compaction.min", minFiles);
  this.conf.setInt("hbase.hstore.compaction.max", maxFiles);
  this.conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, minSize);
  this.conf.setLong("hbase.hstore.compaction.max.size", maxSize);
  this.conf.setFloat("hbase.hstore.compaction.ratio", 1.0F);

  //Setting up a Store
  final String id = TestDefaultCompactSelection.class.getName();
  Path basedir = new Path(DIR);
  final Path logdir = new Path(basedir, DefaultWALProvider.getWALDirectoryName(id));
  HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toBytes("family"));
  FileSystem fs = FileSystem.get(conf);

  fs.delete(logdir, true);

  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(Bytes.toBytes("table")));
  htd.addFamily(hcd);
  HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);

  final Configuration walConf = new Configuration(conf);
  FSUtils.setRootDir(walConf, basedir);
  wals = new WALFactory(walConf, null, id);
  region = HRegion.createHRegion(info, basedir, conf, htd);
  HRegion.closeHRegion(region);
  Path tableDir = FSUtils.getTableDir(basedir, htd.getTableName());
  region = new HRegion(tableDir, wals.getWAL(info.getEncodedNameAsBytes()), fs, conf, info, htd,
      null);

  store = new HStore(region, hcd, conf);

  TEST_FILE = region.getRegionFileSystem().createTempName();
  fs.createNewFile(TEST_FILE);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:39,代碼來源:TestDefaultCompactSelection.java


注:本文中的org.apache.hadoop.hbase.HTableDescriptor.addFamily方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。