当前位置: 首页>>代码示例>>Java>>正文


Java HTableDescriptor.addFamily方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.HTableDescriptor.addFamily方法的典型用法代码示例。如果您正苦于以下问题:Java HTableDescriptor.addFamily方法的具体用法?Java HTableDescriptor.addFamily怎么用?Java HTableDescriptor.addFamily使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.HTableDescriptor的用法示例。


在下文中一共展示了HTableDescriptor.addFamily方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: setUp

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {
  testVals = makeTestVals();

  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(this.getClass().getSimpleName()));
  HColumnDescriptor hcd0 = new HColumnDescriptor(FAMILIES[0]);
  hcd0.setMaxVersions(3);
  htd.addFamily(hcd0);
  HColumnDescriptor hcd1 = new HColumnDescriptor(FAMILIES[1]);
  hcd1.setMaxVersions(3);
  htd.addFamily(hcd1);
  HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
  this.region = HRegion.createHRegion(info, TEST_UTIL.getDataTestDir(),
    TEST_UTIL.getConfiguration(), htd);
  addData();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:17,代码来源:TestDependentColumnFilter.java

示例2: runTestFromCommandLine

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
@Override
public int runTestFromCommandLine() throws Exception {
  IntegrationTestingUtility.setUseDistributedCluster(getConf());
  int numPresplits = getConf().getInt("loadmapper.numPresplits", 5);
  // create HTableDescriptor for specified table
  HTableDescriptor htd = new HTableDescriptor(getTablename());
  htd.addFamily(new HColumnDescriptor(TEST_FAMILY));

  Admin admin = new HBaseAdmin(getConf());
  try {
    admin.createTable(htd, Bytes.toBytes(0L), Bytes.toBytes(-1L), numPresplits);
  } finally {
    admin.close();
  }
  doLoad(getConf(), htd);
  doVerify(getConf(), htd);
  getTestingUtil(getConf()).deleteTable(htd.getName());
  return 0;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:IntegrationTestWithCellVisibilityLoadAndVerify.java

示例3: createCCTTableDescriptor

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
protected HTableDescriptor createCCTTableDescriptor(byte[] indexColumn)
    throws IndexNotExistedException {
  IndexSpecification indexSpec = this.getIndexSpecification(indexColumn);
  HTableDescriptor indexTableDescriptor =
      new HTableDescriptor(IndexUtils.getCCTName(indexSpec.getTableName()));
  System.out.println("winter new cct table name: " + indexTableDescriptor.getTableName());
  if (indexSpec.getIndexType() == IndexType.CCIndex) {
    for (HColumnDescriptor desc : this.descriptor.getFamilies()) {
      // column is f, the only family
      indexTableDescriptor.addFamily(desc);
    }
  } else if (indexSpec.getIndexType() == IndexType.UDGIndex) {
    Set<byte[]> family = indexSpec.getAdditionMap().keySet();
    if (family.size() != 0) {
      for (byte[] name : family) {
        indexTableDescriptor.addFamily(this.descriptor.getFamily(name));
      }
    } else {
      indexTableDescriptor.addFamily(this.descriptor.getFamily(indexSpec.getFamily()));
    }
  } else if (indexSpec.getIndexType() == IndexType.GSIndex) {
    indexTableDescriptor.addFamily(this.descriptor.getFamily(indexSpec.getFamily()));
  }

  indexTableDescriptor.setValue(IndexConstants.INDEX_TYPE,
      Bytes.toBytes(indexSpec.getIndexType().toString())); // record the index type
  return indexTableDescriptor;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:29,代码来源:IndexTableDescriptor.java

示例4: testIsUnloaded

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
/**
 * Check to make sure a constraint is unloaded when it fails
 * @throws Exception
 */
@Test
public void testIsUnloaded() throws Exception {
  // create the table
  HTableDescriptor desc = new HTableDescriptor(tableName);
  // add a family to the table
  for (byte[] family : new byte[][] { dummy, test }) {
    desc.addFamily(new HColumnDescriptor(family));
  }
  // make sure that constraints are unloaded
  Constraints.add(desc, RuntimeFailConstraint.class);
  // add a constraint to check to see if is run
  Constraints.add(desc, CheckWasRunConstraint.class);
  CheckWasRunConstraint.wasRun = false;

  util.getHBaseAdmin().createTable(desc);
  Table table = new HTable(util.getConfiguration(), tableName);

  // test that we do fail on violation
  Put put = new Put(row1);
  put.add(dummy, new byte[0], "pass".getBytes());
  
  try{
  table.put(put);
  fail("RuntimeFailConstraint wasn't triggered - this put shouldn't work!");
  } catch (Exception e) {// NOOP
  }

  // try the put again, this time constraints are not used, so it works
  table.put(put);
  // and we make sure that constraints were not run...
  assertFalse(CheckWasRunConstraint.wasRun);
  table.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:38,代码来源:TestConstraint.java

示例5: testOpenFailed

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
/**
 * This tests region open failed
 */
@Test (timeout=60000)
public void testOpenFailed() throws Exception {
  String table = "testOpenFailed";
  try {
    HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(table));
    desc.addFamily(new HColumnDescriptor(FAMILY));
    admin.createTable(desc);

    Table meta = new HTable(conf, TableName.META_TABLE_NAME);
    HRegionInfo hri = new HRegionInfo(
      desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z"));
    MetaTableAccessor.addRegionToMeta(meta, hri);

    MyLoadBalancer.controledRegion = hri.getEncodedName();

    HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
    master.assignRegion(hri);
    AssignmentManager am = master.getAssignmentManager();
    assertFalse(am.waitForAssignment(hri));

    RegionState state = am.getRegionStates().getRegionState(hri);
    assertEquals(RegionState.State.FAILED_OPEN, state.getState());
    // Failed to open since no plan, so it's on no server
    assertNull(state.getServerName());

    MyLoadBalancer.controledRegion = null;
    master.assignRegion(hri);
    assertTrue(am.waitForAssignment(hri));

    ServerName serverName = master.getAssignmentManager().
      getRegionStates().getRegionServerOfRegion(hri);
    TEST_UTIL.assertRegionOnServer(hri, serverName, 6000);
  } finally {
    MyLoadBalancer.controledRegion = null;
    TEST_UTIL.deleteTable(Bytes.toBytes(table));
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:41,代码来源:TestAssignmentManagerOnCluster.java

示例6: testScannerSelection

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
@Test
public void testScannerSelection() throws IOException {
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setInt("hbase.hstore.compactionThreshold", 10000);
  HColumnDescriptor hcd = new HColumnDescriptor(FAMILY_BYTES).setBlockCacheEnabled(true)
      .setBloomFilterType(bloomType);
  HTableDescriptor htd = new HTableDescriptor(TABLE);
  htd.addFamily(hcd);
  HRegionInfo info = new HRegionInfo(TABLE);
  HRegion region = HRegion.createHRegion(info, TEST_UTIL.getDataTestDir(), conf, htd);

  for (int iFile = 0; iFile < NUM_FILES; ++iFile) {
    for (int iRow = 0; iRow < NUM_ROWS; ++iRow) {
      Put put = new Put(Bytes.toBytes("row" + iRow));
      for (int iCol = 0; iCol < NUM_COLS_PER_ROW; ++iCol) {
        put.add(FAMILY_BYTES, Bytes.toBytes("col" + iCol),
            Bytes.toBytes("value" + iFile + "_" + iRow + "_" + iCol));
      }
      region.put(put);
    }
    region.flush(true);
  }

  Scan scan = new Scan(Bytes.toBytes("aaa"), Bytes.toBytes("aaz"));
  CacheConfig.blockCacheDisabled = false;
  CacheConfig cacheConf = new CacheConfig(conf);
  LruBlockCache cache = (LruBlockCache) cacheConf.getBlockCache();
  cache.clearCache();
  InternalScanner scanner = region.getScanner(scan);
  List<Cell> results = new ArrayList<Cell>();
  while (scanner.next(results)) {
  }
  scanner.close();
  assertEquals(0, results.size());
  Set<String> accessedFiles = cache.getCachedFileNamesForTest();
  assertEquals(expectedCount, accessedFiles.size());
  region.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:39,代码来源:TestScannerSelectionUsingKeyRange.java

示例7: testCreateTableWithOnlyEmptyStartRow

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
@Test (timeout=300000)
public void testCreateTableWithOnlyEmptyStartRow() throws IOException {
  byte[] tableName = Bytes.toBytes("testCreateTableWithOnlyEmptyStartRow");
  byte[][] splitKeys = new byte[1][];
  splitKeys[0] = HConstants.EMPTY_BYTE_ARRAY;
  HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
  desc.addFamily(new HColumnDescriptor("col"));
  try {
    admin.createTable(desc, splitKeys);
    fail("Test case should fail as empty split key is passed.");
  } catch (IllegalArgumentException e) {
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:14,代码来源:TestAdmin1.java

示例8: testAssignRegion

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
/**
 * This tests region assignment
 */
@Test (timeout=60000)
public void testAssignRegion() throws Exception {
  String table = "testAssignRegion";
  try {
    HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(table));
    desc.addFamily(new HColumnDescriptor(FAMILY));
    admin.createTable(desc);

    Table meta = new HTable(conf, TableName.META_TABLE_NAME);
    HRegionInfo hri = new HRegionInfo(
      desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z"));
    MetaTableAccessor.addRegionToMeta(meta, hri);

    HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
    master.assignRegion(hri);
    AssignmentManager am = master.getAssignmentManager();
    am.waitForAssignment(hri);

    RegionStates regionStates = am.getRegionStates();
    ServerName serverName = regionStates.getRegionServerOfRegion(hri);
    TEST_UTIL.assertRegionOnServer(hri, serverName, 6000);

    // Region is assigned now. Let's assign it again.
    // Master should not abort, and region should be assigned.
    RegionState oldState = regionStates.getRegionState(hri);
    TEST_UTIL.getHBaseAdmin().assign(hri.getRegionName());
    master.getAssignmentManager().waitForAssignment(hri);
    RegionState newState = regionStates.getRegionState(hri);
    assertTrue(newState.isOpened()
      && newState.getStamp() != oldState.getStamp());
  } finally {
    TEST_UTIL.deleteTable(Bytes.toBytes(table));
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:38,代码来源:TestAssignmentManagerOnCluster.java

示例9: testDisableCatalogTable

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
@Test (timeout=300000)
public void testDisableCatalogTable() throws Exception {
  try {
    this.admin.disableTable(TableName.META_TABLE_NAME);
    fail("Expected to throw ConstraintException");
  } catch (ConstraintException e) {
  }
  // Before the fix for HBASE-6146, the below table creation was failing as the hbase:meta table
  // actually getting disabled by the disableTable() call.
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("testDisableCatalogTable".getBytes()));
  HColumnDescriptor hcd = new HColumnDescriptor("cf1".getBytes());
  htd.addFamily(hcd);
  TEST_UTIL.getHBaseAdmin().createTable(htd);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:15,代码来源:TestAdmin2.java

示例10: setupMockColumnFamiliesForDataBlockEncoding

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
private void setupMockColumnFamiliesForDataBlockEncoding(Table table,
    Map<String, DataBlockEncoding> familyToDataBlockEncoding) throws IOException {
  HTableDescriptor mockTableDescriptor = new HTableDescriptor(TABLE_NAME);
  for (Entry<String, DataBlockEncoding> entry : familyToDataBlockEncoding.entrySet()) {
    mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey())
        .setMaxVersions(1)
        .setDataBlockEncoding(entry.getValue())
        .setBlockCacheEnabled(false)
        .setTimeToLive(0));
  }
  Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:13,代码来源:TestHFileOutputFormat2.java

示例11: run

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
@Override
 public void run() {
   // create a table : master coprocessor will throw an exception and not
   // catch it.
   HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TEST_TABLE));
   htd.addFamily(new HColumnDescriptor(TEST_FAMILY));
   try {
     Admin admin = UTIL.getHBaseAdmin();
     admin.createTable(htd);
     fail("BuggyMasterObserver failed to throw an exception.");
   } catch (IOException e) {
     assertEquals("HBaseAdmin threw an interrupted IOException as expected.",
         e.getClass().getName(), "java.io.InterruptedIOException");
   }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:16,代码来源:TestMasterCoprocessorExceptionWithAbort.java

示例12: testRollbackAndDoubleExecutionAfterPONR

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
@Test(timeout = 60000)
public void testRollbackAndDoubleExecutionAfterPONR() throws Exception {
  final TableName tableName = TableName.valueOf("testRollbackAndDoubleExecutionAfterPONR");
  final String familyToAddName = "cf2";
  final String familyToRemove = "cf1";
  final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();

  // create the table
  HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(
    procExec, tableName, null, familyToRemove);
  UTIL.getHBaseAdmin().disableTable(tableName);

  ProcedureTestingUtility.waitNoProcedureRunning(procExec);
  ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);

  HTableDescriptor htd = new HTableDescriptor(UTIL.getHBaseAdmin().getTableDescriptor(tableName));
  htd.setCompactionEnabled(!htd.isCompactionEnabled());
  htd.addFamily(new HColumnDescriptor(familyToAddName));
  htd.removeFamily(familyToRemove.getBytes());
  htd.setRegionReplication(3);

  // Start the Modify procedure && kill the executor
  long procId = procExec.submitProcedure(
    new ModifyTableProcedure(procExec.getEnvironment(), htd), nonceGroup, nonce);

  // Failing after MODIFY_TABLE_DELETE_FS_LAYOUT we should not trigger the rollback.
  // NOTE: the 5 (number of MODIFY_TABLE_DELETE_FS_LAYOUT + 1 step) is hardcoded,
  //       so you have to look at this test at least once when you add a new step.
  int numberOfSteps = 5;
  MasterProcedureTestingUtility.testRollbackAndDoubleExecutionAfterPONR(
    procExec,
    procId,
    numberOfSteps,
    ModifyTableState.values());

  // "cf2" should be added and "cf1" should be removed
  MasterProcedureTestingUtility.validateTableCreation(UTIL.getHBaseCluster().getMaster(),
    tableName, regions, false, familyToAddName);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:40,代码来源:TestModifyTableProcedure.java

示例13: setUpBeforeClass

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  SUPERUSER = User.createUserForTesting(conf, "admin",
      new String[] { "supergroup" });
  conf = TEST_UTIL.getConfiguration();
  conf.setClass(VisibilityUtils.VISIBILITY_LABEL_GENERATOR_CLASS,
      SimpleScanLabelGenerator.class, ScanLabelGenerator.class);
  conf.setInt("hfile.format.version", 3);
  conf.set("hbase.superuser", SUPERUSER.getShortName());
  conf.set("hbase.coprocessor.master.classes", VisibilityController.class.getName());
  conf.set("hbase.coprocessor.region.classes", VisibilityController.class.getName());
  TEST_UTIL.startMiniCluster(1);
  // Wait for the labels table to become available
  TEST_UTIL.waitTableEnabled(VisibilityConstants.LABELS_TABLE_NAME.getName(), 50000);
  createLabels();
  setAuths();
  REST_TEST_UTIL.startServletContainer(conf);
  client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort()));
  context = JAXBContext.newInstance(CellModel.class, CellSetModel.class, RowModel.class,
      ScannerModel.class);
  marshaller = context.createMarshaller();
  unmarshaller = context.createUnmarshaller();
  Admin admin = TEST_UTIL.getHBaseAdmin();
  if (admin.tableExists(TABLE)) {
    return;
  }
  HTableDescriptor htd = new HTableDescriptor(TABLE);
  htd.addFamily(new HColumnDescriptor(CFA));
  htd.addFamily(new HColumnDescriptor(CFB));
  admin.createTable(htd);
  insertData(TABLE, COLUMN_1, 1.0);
  insertData(TABLE, COLUMN_2, 0.5);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:34,代码来源:TestScannersWithLabels.java

示例14: generateHBaseDatasetFloatOB

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
public static void generateHBaseDatasetFloatOB(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }

  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.addFamily(new HColumnDescriptor(FAMILY_F));

  if (numberRegions > 1) {
    admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
  } else {
    admin.createTable(desc);
  }

  HTable table = new HTable(admin.getConfiguration(), tableName);

  for (float i = (float)0.5; i <= 100.00; i += 0.75) {
    byte[] bytes = new byte[5];
    org.apache.hadoop.hbase.util.PositionedByteRange br =
            new org.apache.hadoop.hbase.util.SimplePositionedByteRange(bytes, 0, 5);
    org.apache.hadoop.hbase.util.OrderedBytes.encodeFloat32(br, i,
            org.apache.hadoop.hbase.util.Order.ASCENDING);
    Put p = new Put(bytes);
    p.add(FAMILY_F, COLUMN_C, String.format("value %03f", i).getBytes());
    table.put(p);
  }

  table.flushCommits();
  table.close();

  admin.flush(tableName);
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:34,代码来源:TestTableGenerator.java

示例15: setUp

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
@Override
public void setUp() throws Exception {
  // setup config values necessary for store
  this.conf = TEST_UTIL.getConfiguration();
  this.conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, 0);
  this.conf.setInt("hbase.hstore.compaction.min", minFiles);
  this.conf.setInt("hbase.hstore.compaction.max", maxFiles);
  this.conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, minSize);
  this.conf.setLong("hbase.hstore.compaction.max.size", maxSize);
  this.conf.setFloat("hbase.hstore.compaction.ratio", 1.0F);

  //Setting up a Store
  final String id = TestDefaultCompactSelection.class.getName();
  Path basedir = new Path(DIR);
  final Path logdir = new Path(basedir, DefaultWALProvider.getWALDirectoryName(id));
  HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toBytes("family"));
  FileSystem fs = FileSystem.get(conf);

  fs.delete(logdir, true);

  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(Bytes.toBytes("table")));
  htd.addFamily(hcd);
  HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);

  final Configuration walConf = new Configuration(conf);
  FSUtils.setRootDir(walConf, basedir);
  wals = new WALFactory(walConf, null, id);
  region = HRegion.createHRegion(info, basedir, conf, htd);
  HRegion.closeHRegion(region);
  Path tableDir = FSUtils.getTableDir(basedir, htd.getTableName());
  region = new HRegion(tableDir, wals.getWAL(info.getEncodedNameAsBytes()), fs, conf, info, htd,
      null);

  store = new HStore(region, hcd, conf);

  TEST_FILE = region.getRegionFileSystem().createTempName();
  fs.createNewFile(TEST_FILE);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:39,代码来源:TestDefaultCompactSelection.java


注:本文中的org.apache.hadoop.hbase.HTableDescriptor.addFamily方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。