当前位置: 首页>>代码示例>>Java>>正文


Java HTableDescriptor.setValue方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.HTableDescriptor.setValue方法的典型用法代码示例。如果您正苦于以下问题:Java HTableDescriptor.setValue方法的具体用法?Java HTableDescriptor.setValue怎么用?Java HTableDescriptor.setValue使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.HTableDescriptor的用法示例。


在下文中一共展示了HTableDescriptor.setValue方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: postStartMaster

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
/********************************* Master related hooks **********************************/

  @Override
  public void postStartMaster(ObserverContext<MasterCoprocessorEnvironment> ctx) throws IOException {
    // Need to create the new system table for labels here
    MasterServices master = ctx.getEnvironment().getMasterServices();
    if (!MetaTableAccessor.tableExists(master.getConnection(), LABELS_TABLE_NAME)) {
      HTableDescriptor labelsTable = new HTableDescriptor(LABELS_TABLE_NAME);
      HColumnDescriptor labelsColumn = new HColumnDescriptor(LABELS_TABLE_FAMILY);
      labelsColumn.setBloomFilterType(BloomType.NONE);
      labelsColumn.setBlockCacheEnabled(false); // We will cache all the labels. No need of normal
                                                 // table block cache.
      labelsTable.addFamily(labelsColumn);
      // Let the "labels" table having only one region always. We are not expecting too many labels in
      // the system.
      labelsTable.setValue(HTableDescriptor.SPLIT_POLICY,
          DisabledRegionSplitPolicy.class.getName());
      labelsTable.setValue(Bytes.toBytes(HConstants.DISALLOW_WRITES_IN_RECOVERING),
          Bytes.toBytes(true));
      master.createTable(labelsTable, null, HConstants.NO_NONCE, HConstants.NO_NONCE);
    }
  }
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:VisibilityController.java

示例2: testCreateTableDescriptorUpdatesIfExistsAlready

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
@Test
public void testCreateTableDescriptorUpdatesIfExistsAlready() throws IOException {
  Path testdir = UTIL.getDataTestDir("testCreateTableDescriptorUpdatesIfThereExistsAlready");
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(
      "testCreateTableDescriptorUpdatesIfThereExistsAlready"));
  FileSystem fs = FileSystem.get(UTIL.getConfiguration());
  FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, testdir);
  assertTrue(fstd.createTableDescriptor(htd));
  assertFalse(fstd.createTableDescriptor(htd));
  htd.setValue(Bytes.toBytes("mykey"), Bytes.toBytes("myValue"));
  assertTrue(fstd.createTableDescriptor(htd)); //this will re-create
  Path tableDir = fstd.getTableDir(htd.getTableName());
  Path tmpTableDir = new Path(tableDir, FSTableDescriptors.TMP_DIR);
  FileStatus[] statuses = fs.listStatus(tmpTableDir);
  assertTrue(statuses.length == 0);

  assertEquals(htd, FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:TestFSTableDescriptors.java

示例3: createIndexTableDescriptor

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
public HTableDescriptor createIndexTableDescriptor(byte[] indexColumn)
    throws IndexNotExistedException {
  IndexSpecification indexSpec = this.getIndexSpecification(indexColumn);
  HTableDescriptor indexTableDescriptor = new HTableDescriptor(indexSpec.getIndexTableName());
  if (indexSpec.getIndexType() == IndexType.CCIndex) {
    for (HColumnDescriptor desc : this.descriptor.getFamilies()) {
      indexTableDescriptor.addFamily(desc);
    }
  } else if (indexSpec.getIndexType() == IndexType.UDGIndex) {
    Set<byte[]> family = indexSpec.getAdditionMap().keySet();
    if (family.size() != 0) {
      for (byte[] name : family) {
        indexTableDescriptor.addFamily(this.descriptor.getFamily(name));
      }
    } else {
      indexTableDescriptor.addFamily(this.descriptor.getFamily(indexSpec.getFamily()));
    }
  } else if (indexSpec.getIndexType() == IndexType.GSIndex) {
    indexTableDescriptor.addFamily(this.descriptor.getFamily(indexSpec.getFamily()));
  }

  indexTableDescriptor.setValue(IndexConstants.INDEX_TYPE,
      Bytes.toBytes(indexSpec.getIndexType().toString())); // record the index type
  return indexTableDescriptor;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:26,代码来源:IndexTableDescriptor.java

示例4: createTableWithNonDefaultProperties

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
private void createTableWithNonDefaultProperties() throws Exception {
  final long startTime = System.currentTimeMillis();
  final String sourceTableNameAsString = STRING_TABLE_NAME + startTime;
  originalTableName = TableName.valueOf(sourceTableNameAsString);

  // enable replication on a column family
  HColumnDescriptor maxVersionsColumn = new HColumnDescriptor(MAX_VERSIONS_FAM);
  HColumnDescriptor bloomFilterColumn = new HColumnDescriptor(BLOOMFILTER_FAM);
  HColumnDescriptor dataBlockColumn = new HColumnDescriptor(COMPRESSED_FAM);
  HColumnDescriptor blockSizeColumn = new HColumnDescriptor(BLOCKSIZE_FAM);

  maxVersionsColumn.setMaxVersions(MAX_VERSIONS);
  bloomFilterColumn.setBloomFilterType(BLOOM_TYPE);
  dataBlockColumn.setDataBlockEncoding(DATA_BLOCK_ENCODING_TYPE);
  blockSizeColumn.setBlocksize(BLOCK_SIZE);

  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(sourceTableNameAsString));
  htd.addFamily(maxVersionsColumn);
  htd.addFamily(bloomFilterColumn);
  htd.addFamily(dataBlockColumn);
  htd.addFamily(blockSizeColumn);
  htd.setValue(TEST_CUSTOM_VALUE, TEST_CUSTOM_VALUE);
  htd.setConfiguration(TEST_CONF_CUSTOM_VALUE, TEST_CONF_CUSTOM_VALUE);
  assertTrue(htd.getConfiguration().size() > 0);

  admin.createTable(htd);
  Table original = new HTable(UTIL.getConfiguration(), originalTableName);
  originalTableName = TableName.valueOf(sourceTableNameAsString);
  originalTableDescriptor = admin.getTableDescriptor(originalTableName);
  originalTableDescription = originalTableDescriptor.toStringCustomizedValues();

  original.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:34,代码来源:TestSnapshotMetadata.java

示例5: testShouldFailOnlineSchemaUpdateIfOnlineSchemaIsNotEnabled

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
@Test (timeout=300000)
public void testShouldFailOnlineSchemaUpdateIfOnlineSchemaIsNotEnabled()
    throws Exception {
  final TableName tableName = TableName.valueOf("changeTableSchemaOnlineFailure");
  TEST_UTIL.getMiniHBaseCluster().getMaster().getConfiguration().setBoolean(
      "hbase.online.schema.update.enable", false);
  HTableDescriptor[] tables = admin.listTables();
  int numTables = tables.length;
  TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY).close();
  tables = this.admin.listTables();
  assertEquals(numTables + 1, tables.length);

  // FIRST, do htabledescriptor changes.
  HTableDescriptor htd = this.admin.getTableDescriptor(tableName);
  // Make a copy and assert copy is good.
  HTableDescriptor copy = new HTableDescriptor(htd);
  assertTrue(htd.equals(copy));
  // Now amend the copy. Introduce differences.
  long newFlushSize = htd.getMemStoreFlushSize() / 2;
  if (newFlushSize <=0) {
    newFlushSize = HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE / 2;
  }
  copy.setMemStoreFlushSize(newFlushSize);
  final String key = "anyoldkey";
  assertTrue(htd.getValue(key) == null);
  copy.setValue(key, key);
  boolean expectedException = false;
  try {
    admin.modifyTable(tableName, copy);
  } catch (TableNotDisabledException re) {
    expectedException = true;
  }
  assertTrue("Online schema update should not happen.", expectedException);

  // Reset the value for the other tests
  TEST_UTIL.getMiniHBaseCluster().getMaster().getConfiguration().setBoolean(
      "hbase.online.schema.update.enable", true);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:39,代码来源:TestAdmin1.java

示例6: testCustomPolicy

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
/**
 * Test setting up a customized split policy
 */
@Test
public void testCustomPolicy() throws IOException {
  HTableDescriptor myHtd = new HTableDescriptor();
  myHtd.setValue(HTableDescriptor.SPLIT_POLICY,
      KeyPrefixRegionSplitPolicy.class.getName());
  myHtd.setValue(KeyPrefixRegionSplitPolicy.PREFIX_LENGTH_KEY, String.valueOf(2));

  HRegion myMockRegion = Mockito.mock(HRegion.class);
  Mockito.doReturn(myHtd).when(myMockRegion).getTableDesc();
  Mockito.doReturn(stores).when(myMockRegion).getStores();

  HStore mockStore = Mockito.mock(HStore.class);
  Mockito.doReturn(2000L).when(mockStore).getSize();
  Mockito.doReturn(true).when(mockStore).canSplit();
  Mockito.doReturn(Bytes.toBytes("abcd")).when(mockStore).getSplitPoint();
  stores.add(mockStore);

  KeyPrefixRegionSplitPolicy policy = (KeyPrefixRegionSplitPolicy) RegionSplitPolicy
      .create(myMockRegion, conf);

  assertEquals("ab", Bytes.toString(policy.getSplitPoint()));

  Mockito.doReturn(true).when(myMockRegion).shouldForceSplit();
  Mockito.doReturn(Bytes.toBytes("efgh")).when(myMockRegion)
      .getExplicitSplitPoint();

  policy = (KeyPrefixRegionSplitPolicy) RegionSplitPolicy
      .create(myMockRegion, conf);

  assertEquals("ef", Bytes.toString(policy.getSplitPoint()));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:35,代码来源:TestRegionSplitPolicy.java

示例7: testDelimitedKeyPrefixRegionSplitPolicy

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
@Test
public void testDelimitedKeyPrefixRegionSplitPolicy() throws IOException {
  HTableDescriptor myHtd = new HTableDescriptor();
  myHtd.setValue(HTableDescriptor.SPLIT_POLICY,
      DelimitedKeyPrefixRegionSplitPolicy.class.getName());
  myHtd.setValue(DelimitedKeyPrefixRegionSplitPolicy.DELIMITER_KEY, ",");

  HRegion myMockRegion = Mockito.mock(HRegion.class);
  Mockito.doReturn(myHtd).when(myMockRegion).getTableDesc();
  Mockito.doReturn(stores).when(myMockRegion).getStores();

  HStore mockStore = Mockito.mock(HStore.class);
  Mockito.doReturn(2000L).when(mockStore).getSize();
  Mockito.doReturn(true).when(mockStore).canSplit();
  Mockito.doReturn(Bytes.toBytes("ab,cd")).when(mockStore).getSplitPoint();
  stores.add(mockStore);

  DelimitedKeyPrefixRegionSplitPolicy policy = (DelimitedKeyPrefixRegionSplitPolicy) RegionSplitPolicy
      .create(myMockRegion, conf);

  assertEquals("ab", Bytes.toString(policy.getSplitPoint()));

  Mockito.doReturn(true).when(myMockRegion).shouldForceSplit();
  Mockito.doReturn(Bytes.toBytes("efg,h")).when(myMockRegion)
      .getExplicitSplitPoint();

  policy = (DelimitedKeyPrefixRegionSplitPolicy) RegionSplitPolicy
      .create(myMockRegion, conf);

  assertEquals("efg", Bytes.toString(policy.getSplitPoint()));

  Mockito.doReturn(Bytes.toBytes("ijk")).when(myMockRegion)
  .getExplicitSplitPoint();
  assertEquals("ijk", Bytes.toString(policy.getSplitPoint()));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:36,代码来源:TestRegionSplitPolicy.java

示例8: setColumnInfoMap

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
/**
 * Set each column's data type of this table.
 *
 * @param columnTypes
 * @throws IOException
 */
public void setColumnInfoMap(TableName tableName, Map<byte[], DataType> columnTypes)
    throws IOException {
  HTableDescriptor desc = admin.getTableDescriptor(tableName);

  if (isTableEnabled(tableName)) {
    throw new IOException("Table " + tableName + " is enabled! Disable it first!");
  }

  StringBuilder sb = new StringBuilder();

  if (columnTypes != null && !columnTypes.isEmpty()) {
    int i = 0;
    for (Map.Entry<byte[], DataType> entry : columnTypes.entrySet()) {
      sb.append(Bytes.toString(entry.getKey()));
      sb.append(":");
      sb.append(entry.getValue().toString());
      if (i != columnTypes.size() - 1) {
        sb.append(",");
      }
      i++;
    }
  }

  desc.setValue("DATA_FORMAT", sb.toString());

  admin.modifyTable(tableName, desc);
  // TODO maybe need to enable and disable, check add indexes
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:35,代码来源:CCIndexAdmin.java

示例9: createCCTTableDescriptor

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
protected HTableDescriptor createCCTTableDescriptor(byte[] indexColumn)
    throws IndexNotExistedException {
  IndexSpecification indexSpec = this.getIndexSpecification(indexColumn);
  HTableDescriptor indexTableDescriptor =
      new HTableDescriptor(IndexUtils.getCCTName(indexSpec.getTableName()));
  System.out.println("winter new cct table name: " + indexTableDescriptor.getTableName());
  if (indexSpec.getIndexType() == IndexType.CCIndex) {
    for (HColumnDescriptor desc : this.descriptor.getFamilies()) {
      // column is f, the only family
      indexTableDescriptor.addFamily(desc);
    }
  } else if (indexSpec.getIndexType() == IndexType.UDGIndex) {
    Set<byte[]> family = indexSpec.getAdditionMap().keySet();
    if (family.size() != 0) {
      for (byte[] name : family) {
        indexTableDescriptor.addFamily(this.descriptor.getFamily(name));
      }
    } else {
      indexTableDescriptor.addFamily(this.descriptor.getFamily(indexSpec.getFamily()));
    }
  } else if (indexSpec.getIndexType() == IndexType.GSIndex) {
    indexTableDescriptor.addFamily(this.descriptor.getFamily(indexSpec.getFamily()));
  }

  indexTableDescriptor.setValue(IndexConstants.INDEX_TYPE,
      Bytes.toBytes(indexSpec.getIndexType().toString())); // record the index type
  return indexTableDescriptor;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:29,代码来源:IndexTableDescriptor.java

示例10: writeConstraint

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
/**
 * Write the given key and associated configuration to the
 * {@link HTableDescriptor}
 */
private static void writeConstraint(HTableDescriptor desc, String key,
    Configuration conf) throws IOException {
  // store the key and conf in the descriptor
  desc.setValue(key, serializeConfiguration(conf));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:10,代码来源:Constraints.java

示例11: updateLatestPriority

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
private static void updateLatestPriority(HTableDescriptor desc, long priority) {
  // update the max priority
  desc.setValue(COUNTER_KEY, Long.toString(priority));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:5,代码来源:Constraints.java

示例12: testHbckFixOrphanTable

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
@Test (timeout=180000)
public void testHbckFixOrphanTable() throws Exception {
  TableName table = TableName.valueOf("tableInfo");
  FileSystem fs = null;
  Path tableinfo = null;
  try {
    setupTable(table);

    Path hbaseTableDir = FSUtils.getTableDir(
        FSUtils.getRootDir(conf), table);
    fs = hbaseTableDir.getFileSystem(conf);
    FileStatus status = FSTableDescriptors.getTableInfoPath(fs, hbaseTableDir);
    tableinfo = status.getPath();
    fs.rename(tableinfo, new Path("/.tableinfo"));

    //to report error if .tableinfo is missing.
    HBaseFsck hbck = doFsck(conf, false);
    assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.NO_TABLEINFO_FILE });

    // fix OrphanTable with default .tableinfo (htd not yet cached on master)
    hbck = doFsck(conf, true);
    assertNoErrors(hbck);
    status = null;
    status = FSTableDescriptors.getTableInfoPath(fs, hbaseTableDir);
    assertNotNull(status);

    HTableDescriptor htd = admin.getTableDescriptor(table);
    htd.setValue("NOT_DEFAULT", "true");
    admin.disableTable(table);
    admin.modifyTable(table, htd);
    admin.enableTable(table);
    fs.delete(status.getPath(), true);

    // fix OrphanTable with cache
    htd = admin.getTableDescriptor(table); // warms up cached htd on master
    hbck = doFsck(conf, true);
    assertNoErrors(hbck);
    status = FSTableDescriptors.getTableInfoPath(fs, hbaseTableDir);
    assertNotNull(status);
    htd = admin.getTableDescriptor(table);
    assertEquals(htd.getValue("NOT_DEFAULT"), "true");
  } finally {
    fs.rename(new Path("/.tableinfo"), tableinfo);
    cleanupTable(table);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:47,代码来源:TestHBaseFsck.java

示例13: main

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
  int numRegions = Integer.parseInt(args[0]);
  long numRows = Long.parseLong(args[1]);

  HTableDescriptor htd = new HTableDescriptor(TABLENAME);
  htd.setMaxFileSize(10L * 1024 * 1024 * 1024);
  htd.setValue(HTableDescriptor.SPLIT_POLICY, ConstantSizeRegionSplitPolicy.class.getName());
  htd.addFamily(new HColumnDescriptor(FAMILY1));
  htd.addFamily(new HColumnDescriptor(FAMILY2));
  htd.addFamily(new HColumnDescriptor(FAMILY3));

  Configuration conf = HBaseConfiguration.create();
  Connection conn = ConnectionFactory.createConnection(conf);
  Admin admin = conn.getAdmin();
  if (admin.tableExists(TABLENAME)) {
    admin.disableTable(TABLENAME);
    admin.deleteTable(TABLENAME);
  }
  if (numRegions >= 3) {
    byte[] startKey = new byte[16];
    byte[] endKey = new byte[16];
    Arrays.fill(endKey, (byte) 0xFF);
    admin.createTable(htd, startKey, endKey, numRegions);
  } else {
    admin.createTable(htd);
  }
  admin.close();

  Table table = conn.getTable(TABLENAME);
  byte[] qf = Bytes.toBytes("qf");
  Random rand = new Random();
  byte[] value1 = new byte[16];
  byte[] value2 = new byte[256];
  byte[] value3 = new byte[4096];
  for (long i = 0; i < numRows; i++) {
    Put put = new Put(Hashing.md5().hashLong(i).asBytes());
    rand.setSeed(i);
    rand.nextBytes(value1);
    rand.nextBytes(value2);
    rand.nextBytes(value3);
    put.addColumn(FAMILY1, qf, value1);
    put.addColumn(FAMILY2, qf, value2);
    put.addColumn(FAMILY3, qf, value3);
    table.put(put);
    if (i % 10000 == 0) {
      LOG.info(i + " rows put");
    }
  }
  table.close();
  conn.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:52,代码来源:TestPerColumnFamilyFlush.java


注:本文中的org.apache.hadoop.hbase.HTableDescriptor.setValue方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。