當前位置: 首頁>>代碼示例>>Java>>正文


Java HTableDescriptor.setValue方法代碼示例

本文整理匯總了Java中org.apache.hadoop.hbase.HTableDescriptor.setValue方法的典型用法代碼示例。如果您正苦於以下問題:Java HTableDescriptor.setValue方法的具體用法?Java HTableDescriptor.setValue怎麽用?Java HTableDescriptor.setValue使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.hbase.HTableDescriptor的用法示例。


在下文中一共展示了HTableDescriptor.setValue方法的13個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: postStartMaster

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
/********************************* Master related hooks **********************************/

  @Override
  public void postStartMaster(ObserverContext<MasterCoprocessorEnvironment> ctx) throws IOException {
    // Need to create the new system table for labels here
    MasterServices master = ctx.getEnvironment().getMasterServices();
    if (!MetaTableAccessor.tableExists(master.getConnection(), LABELS_TABLE_NAME)) {
      HTableDescriptor labelsTable = new HTableDescriptor(LABELS_TABLE_NAME);
      HColumnDescriptor labelsColumn = new HColumnDescriptor(LABELS_TABLE_FAMILY);
      labelsColumn.setBloomFilterType(BloomType.NONE);
      labelsColumn.setBlockCacheEnabled(false); // We will cache all the labels. No need of normal
                                                 // table block cache.
      labelsTable.addFamily(labelsColumn);
      // Let the "labels" table having only one region always. We are not expecting too many labels in
      // the system.
      labelsTable.setValue(HTableDescriptor.SPLIT_POLICY,
          DisabledRegionSplitPolicy.class.getName());
      labelsTable.setValue(Bytes.toBytes(HConstants.DISALLOW_WRITES_IN_RECOVERING),
          Bytes.toBytes(true));
      master.createTable(labelsTable, null, HConstants.NO_NONCE, HConstants.NO_NONCE);
    }
  }
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:23,代碼來源:VisibilityController.java

示例2: testCreateTableDescriptorUpdatesIfExistsAlready

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
@Test
public void testCreateTableDescriptorUpdatesIfExistsAlready() throws IOException {
  Path testdir = UTIL.getDataTestDir("testCreateTableDescriptorUpdatesIfThereExistsAlready");
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(
      "testCreateTableDescriptorUpdatesIfThereExistsAlready"));
  FileSystem fs = FileSystem.get(UTIL.getConfiguration());
  FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, testdir);
  assertTrue(fstd.createTableDescriptor(htd));
  assertFalse(fstd.createTableDescriptor(htd));
  htd.setValue(Bytes.toBytes("mykey"), Bytes.toBytes("myValue"));
  assertTrue(fstd.createTableDescriptor(htd)); //this will re-create
  Path tableDir = fstd.getTableDir(htd.getTableName());
  Path tmpTableDir = new Path(tableDir, FSTableDescriptors.TMP_DIR);
  FileStatus[] statuses = fs.listStatus(tmpTableDir);
  assertTrue(statuses.length == 0);

  assertEquals(htd, FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir));
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:19,代碼來源:TestFSTableDescriptors.java

示例3: createIndexTableDescriptor

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
public HTableDescriptor createIndexTableDescriptor(byte[] indexColumn)
    throws IndexNotExistedException {
  IndexSpecification indexSpec = this.getIndexSpecification(indexColumn);
  HTableDescriptor indexTableDescriptor = new HTableDescriptor(indexSpec.getIndexTableName());
  if (indexSpec.getIndexType() == IndexType.CCIndex) {
    for (HColumnDescriptor desc : this.descriptor.getFamilies()) {
      indexTableDescriptor.addFamily(desc);
    }
  } else if (indexSpec.getIndexType() == IndexType.UDGIndex) {
    Set<byte[]> family = indexSpec.getAdditionMap().keySet();
    if (family.size() != 0) {
      for (byte[] name : family) {
        indexTableDescriptor.addFamily(this.descriptor.getFamily(name));
      }
    } else {
      indexTableDescriptor.addFamily(this.descriptor.getFamily(indexSpec.getFamily()));
    }
  } else if (indexSpec.getIndexType() == IndexType.GSIndex) {
    indexTableDescriptor.addFamily(this.descriptor.getFamily(indexSpec.getFamily()));
  }

  indexTableDescriptor.setValue(IndexConstants.INDEX_TYPE,
      Bytes.toBytes(indexSpec.getIndexType().toString())); // record the index type
  return indexTableDescriptor;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:26,代碼來源:IndexTableDescriptor.java

示例4: createTableWithNonDefaultProperties

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
private void createTableWithNonDefaultProperties() throws Exception {
  final long startTime = System.currentTimeMillis();
  final String sourceTableNameAsString = STRING_TABLE_NAME + startTime;
  originalTableName = TableName.valueOf(sourceTableNameAsString);

  // enable replication on a column family
  HColumnDescriptor maxVersionsColumn = new HColumnDescriptor(MAX_VERSIONS_FAM);
  HColumnDescriptor bloomFilterColumn = new HColumnDescriptor(BLOOMFILTER_FAM);
  HColumnDescriptor dataBlockColumn = new HColumnDescriptor(COMPRESSED_FAM);
  HColumnDescriptor blockSizeColumn = new HColumnDescriptor(BLOCKSIZE_FAM);

  maxVersionsColumn.setMaxVersions(MAX_VERSIONS);
  bloomFilterColumn.setBloomFilterType(BLOOM_TYPE);
  dataBlockColumn.setDataBlockEncoding(DATA_BLOCK_ENCODING_TYPE);
  blockSizeColumn.setBlocksize(BLOCK_SIZE);

  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(sourceTableNameAsString));
  htd.addFamily(maxVersionsColumn);
  htd.addFamily(bloomFilterColumn);
  htd.addFamily(dataBlockColumn);
  htd.addFamily(blockSizeColumn);
  htd.setValue(TEST_CUSTOM_VALUE, TEST_CUSTOM_VALUE);
  htd.setConfiguration(TEST_CONF_CUSTOM_VALUE, TEST_CONF_CUSTOM_VALUE);
  assertTrue(htd.getConfiguration().size() > 0);

  admin.createTable(htd);
  Table original = new HTable(UTIL.getConfiguration(), originalTableName);
  originalTableName = TableName.valueOf(sourceTableNameAsString);
  originalTableDescriptor = admin.getTableDescriptor(originalTableName);
  originalTableDescription = originalTableDescriptor.toStringCustomizedValues();

  original.close();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:34,代碼來源:TestSnapshotMetadata.java

示例5: testShouldFailOnlineSchemaUpdateIfOnlineSchemaIsNotEnabled

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
@Test (timeout=300000)
public void testShouldFailOnlineSchemaUpdateIfOnlineSchemaIsNotEnabled()
    throws Exception {
  final TableName tableName = TableName.valueOf("changeTableSchemaOnlineFailure");
  TEST_UTIL.getMiniHBaseCluster().getMaster().getConfiguration().setBoolean(
      "hbase.online.schema.update.enable", false);
  HTableDescriptor[] tables = admin.listTables();
  int numTables = tables.length;
  TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY).close();
  tables = this.admin.listTables();
  assertEquals(numTables + 1, tables.length);

  // FIRST, do htabledescriptor changes.
  HTableDescriptor htd = this.admin.getTableDescriptor(tableName);
  // Make a copy and assert copy is good.
  HTableDescriptor copy = new HTableDescriptor(htd);
  assertTrue(htd.equals(copy));
  // Now amend the copy. Introduce differences.
  long newFlushSize = htd.getMemStoreFlushSize() / 2;
  if (newFlushSize <=0) {
    newFlushSize = HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE / 2;
  }
  copy.setMemStoreFlushSize(newFlushSize);
  final String key = "anyoldkey";
  assertTrue(htd.getValue(key) == null);
  copy.setValue(key, key);
  boolean expectedException = false;
  try {
    admin.modifyTable(tableName, copy);
  } catch (TableNotDisabledException re) {
    expectedException = true;
  }
  assertTrue("Online schema update should not happen.", expectedException);

  // Reset the value for the other tests
  TEST_UTIL.getMiniHBaseCluster().getMaster().getConfiguration().setBoolean(
      "hbase.online.schema.update.enable", true);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:39,代碼來源:TestAdmin1.java

示例6: testCustomPolicy

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
/**
 * Test setting up a customized split policy
 */
@Test
public void testCustomPolicy() throws IOException {
  HTableDescriptor myHtd = new HTableDescriptor();
  myHtd.setValue(HTableDescriptor.SPLIT_POLICY,
      KeyPrefixRegionSplitPolicy.class.getName());
  myHtd.setValue(KeyPrefixRegionSplitPolicy.PREFIX_LENGTH_KEY, String.valueOf(2));

  HRegion myMockRegion = Mockito.mock(HRegion.class);
  Mockito.doReturn(myHtd).when(myMockRegion).getTableDesc();
  Mockito.doReturn(stores).when(myMockRegion).getStores();

  HStore mockStore = Mockito.mock(HStore.class);
  Mockito.doReturn(2000L).when(mockStore).getSize();
  Mockito.doReturn(true).when(mockStore).canSplit();
  Mockito.doReturn(Bytes.toBytes("abcd")).when(mockStore).getSplitPoint();
  stores.add(mockStore);

  KeyPrefixRegionSplitPolicy policy = (KeyPrefixRegionSplitPolicy) RegionSplitPolicy
      .create(myMockRegion, conf);

  assertEquals("ab", Bytes.toString(policy.getSplitPoint()));

  Mockito.doReturn(true).when(myMockRegion).shouldForceSplit();
  Mockito.doReturn(Bytes.toBytes("efgh")).when(myMockRegion)
      .getExplicitSplitPoint();

  policy = (KeyPrefixRegionSplitPolicy) RegionSplitPolicy
      .create(myMockRegion, conf);

  assertEquals("ef", Bytes.toString(policy.getSplitPoint()));
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:35,代碼來源:TestRegionSplitPolicy.java

示例7: testDelimitedKeyPrefixRegionSplitPolicy

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
@Test
public void testDelimitedKeyPrefixRegionSplitPolicy() throws IOException {
  HTableDescriptor myHtd = new HTableDescriptor();
  myHtd.setValue(HTableDescriptor.SPLIT_POLICY,
      DelimitedKeyPrefixRegionSplitPolicy.class.getName());
  myHtd.setValue(DelimitedKeyPrefixRegionSplitPolicy.DELIMITER_KEY, ",");

  HRegion myMockRegion = Mockito.mock(HRegion.class);
  Mockito.doReturn(myHtd).when(myMockRegion).getTableDesc();
  Mockito.doReturn(stores).when(myMockRegion).getStores();

  HStore mockStore = Mockito.mock(HStore.class);
  Mockito.doReturn(2000L).when(mockStore).getSize();
  Mockito.doReturn(true).when(mockStore).canSplit();
  Mockito.doReturn(Bytes.toBytes("ab,cd")).when(mockStore).getSplitPoint();
  stores.add(mockStore);

  DelimitedKeyPrefixRegionSplitPolicy policy = (DelimitedKeyPrefixRegionSplitPolicy) RegionSplitPolicy
      .create(myMockRegion, conf);

  assertEquals("ab", Bytes.toString(policy.getSplitPoint()));

  Mockito.doReturn(true).when(myMockRegion).shouldForceSplit();
  Mockito.doReturn(Bytes.toBytes("efg,h")).when(myMockRegion)
      .getExplicitSplitPoint();

  policy = (DelimitedKeyPrefixRegionSplitPolicy) RegionSplitPolicy
      .create(myMockRegion, conf);

  assertEquals("efg", Bytes.toString(policy.getSplitPoint()));

  Mockito.doReturn(Bytes.toBytes("ijk")).when(myMockRegion)
  .getExplicitSplitPoint();
  assertEquals("ijk", Bytes.toString(policy.getSplitPoint()));
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:36,代碼來源:TestRegionSplitPolicy.java

示例8: setColumnInfoMap

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
/**
 * Set each column's data type of this table.
 *
 * @param columnTypes
 * @throws IOException
 */
public void setColumnInfoMap(TableName tableName, Map<byte[], DataType> columnTypes)
    throws IOException {
  HTableDescriptor desc = admin.getTableDescriptor(tableName);

  if (isTableEnabled(tableName)) {
    throw new IOException("Table " + tableName + " is enabled! Disable it first!");
  }

  StringBuilder sb = new StringBuilder();

  if (columnTypes != null && !columnTypes.isEmpty()) {
    int i = 0;
    for (Map.Entry<byte[], DataType> entry : columnTypes.entrySet()) {
      sb.append(Bytes.toString(entry.getKey()));
      sb.append(":");
      sb.append(entry.getValue().toString());
      if (i != columnTypes.size() - 1) {
        sb.append(",");
      }
      i++;
    }
  }

  desc.setValue("DATA_FORMAT", sb.toString());

  admin.modifyTable(tableName, desc);
  // TODO maybe need to enable and disable, check add indexes
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:35,代碼來源:CCIndexAdmin.java

示例9: createCCTTableDescriptor

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
protected HTableDescriptor createCCTTableDescriptor(byte[] indexColumn)
    throws IndexNotExistedException {
  IndexSpecification indexSpec = this.getIndexSpecification(indexColumn);
  HTableDescriptor indexTableDescriptor =
      new HTableDescriptor(IndexUtils.getCCTName(indexSpec.getTableName()));
  System.out.println("winter new cct table name: " + indexTableDescriptor.getTableName());
  if (indexSpec.getIndexType() == IndexType.CCIndex) {
    for (HColumnDescriptor desc : this.descriptor.getFamilies()) {
      // column is f, the only family
      indexTableDescriptor.addFamily(desc);
    }
  } else if (indexSpec.getIndexType() == IndexType.UDGIndex) {
    Set<byte[]> family = indexSpec.getAdditionMap().keySet();
    if (family.size() != 0) {
      for (byte[] name : family) {
        indexTableDescriptor.addFamily(this.descriptor.getFamily(name));
      }
    } else {
      indexTableDescriptor.addFamily(this.descriptor.getFamily(indexSpec.getFamily()));
    }
  } else if (indexSpec.getIndexType() == IndexType.GSIndex) {
    indexTableDescriptor.addFamily(this.descriptor.getFamily(indexSpec.getFamily()));
  }

  indexTableDescriptor.setValue(IndexConstants.INDEX_TYPE,
      Bytes.toBytes(indexSpec.getIndexType().toString())); // record the index type
  return indexTableDescriptor;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:29,代碼來源:IndexTableDescriptor.java

示例10: writeConstraint

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
/**
 * Write the given key and associated configuration to the
 * {@link HTableDescriptor}
 */
private static void writeConstraint(HTableDescriptor desc, String key,
    Configuration conf) throws IOException {
  // store the key and conf in the descriptor
  desc.setValue(key, serializeConfiguration(conf));
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:10,代碼來源:Constraints.java

示例11: updateLatestPriority

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
private static void updateLatestPriority(HTableDescriptor desc, long priority) {
  // update the max priority
  desc.setValue(COUNTER_KEY, Long.toString(priority));
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:5,代碼來源:Constraints.java

示例12: testHbckFixOrphanTable

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
@Test (timeout=180000)
public void testHbckFixOrphanTable() throws Exception {
  TableName table = TableName.valueOf("tableInfo");
  FileSystem fs = null;
  Path tableinfo = null;
  try {
    setupTable(table);

    Path hbaseTableDir = FSUtils.getTableDir(
        FSUtils.getRootDir(conf), table);
    fs = hbaseTableDir.getFileSystem(conf);
    FileStatus status = FSTableDescriptors.getTableInfoPath(fs, hbaseTableDir);
    tableinfo = status.getPath();
    fs.rename(tableinfo, new Path("/.tableinfo"));

    //to report error if .tableinfo is missing.
    HBaseFsck hbck = doFsck(conf, false);
    assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.NO_TABLEINFO_FILE });

    // fix OrphanTable with default .tableinfo (htd not yet cached on master)
    hbck = doFsck(conf, true);
    assertNoErrors(hbck);
    status = null;
    status = FSTableDescriptors.getTableInfoPath(fs, hbaseTableDir);
    assertNotNull(status);

    HTableDescriptor htd = admin.getTableDescriptor(table);
    htd.setValue("NOT_DEFAULT", "true");
    admin.disableTable(table);
    admin.modifyTable(table, htd);
    admin.enableTable(table);
    fs.delete(status.getPath(), true);

    // fix OrphanTable with cache
    htd = admin.getTableDescriptor(table); // warms up cached htd on master
    hbck = doFsck(conf, true);
    assertNoErrors(hbck);
    status = FSTableDescriptors.getTableInfoPath(fs, hbaseTableDir);
    assertNotNull(status);
    htd = admin.getTableDescriptor(table);
    assertEquals(htd.getValue("NOT_DEFAULT"), "true");
  } finally {
    fs.rename(new Path("/.tableinfo"), tableinfo);
    cleanupTable(table);
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:47,代碼來源:TestHBaseFsck.java

示例13: main

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {
  int numRegions = Integer.parseInt(args[0]);
  long numRows = Long.parseLong(args[1]);

  HTableDescriptor htd = new HTableDescriptor(TABLENAME);
  htd.setMaxFileSize(10L * 1024 * 1024 * 1024);
  htd.setValue(HTableDescriptor.SPLIT_POLICY, ConstantSizeRegionSplitPolicy.class.getName());
  htd.addFamily(new HColumnDescriptor(FAMILY1));
  htd.addFamily(new HColumnDescriptor(FAMILY2));
  htd.addFamily(new HColumnDescriptor(FAMILY3));

  Configuration conf = HBaseConfiguration.create();
  Connection conn = ConnectionFactory.createConnection(conf);
  Admin admin = conn.getAdmin();
  if (admin.tableExists(TABLENAME)) {
    admin.disableTable(TABLENAME);
    admin.deleteTable(TABLENAME);
  }
  if (numRegions >= 3) {
    byte[] startKey = new byte[16];
    byte[] endKey = new byte[16];
    Arrays.fill(endKey, (byte) 0xFF);
    admin.createTable(htd, startKey, endKey, numRegions);
  } else {
    admin.createTable(htd);
  }
  admin.close();

  Table table = conn.getTable(TABLENAME);
  byte[] qf = Bytes.toBytes("qf");
  Random rand = new Random();
  byte[] value1 = new byte[16];
  byte[] value2 = new byte[256];
  byte[] value3 = new byte[4096];
  for (long i = 0; i < numRows; i++) {
    Put put = new Put(Hashing.md5().hashLong(i).asBytes());
    rand.setSeed(i);
    rand.nextBytes(value1);
    rand.nextBytes(value2);
    rand.nextBytes(value3);
    put.addColumn(FAMILY1, qf, value1);
    put.addColumn(FAMILY2, qf, value2);
    put.addColumn(FAMILY3, qf, value3);
    table.put(put);
    if (i % 10000 == 0) {
      LOG.info(i + " rows put");
    }
  }
  table.close();
  conn.close();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:52,代碼來源:TestPerColumnFamilyFlush.java


注:本文中的org.apache.hadoop.hbase.HTableDescriptor.setValue方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。