當前位置: 首頁>>代碼示例>>Java>>正文


Java HTableDescriptor.getFamilies方法代碼示例

本文整理匯總了Java中org.apache.hadoop.hbase.HTableDescriptor.getFamilies方法的典型用法代碼示例。如果您正苦於以下問題:Java HTableDescriptor.getFamilies方法的具體用法?Java HTableDescriptor.getFamilies怎麽用?Java HTableDescriptor.getFamilies使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.hbase.HTableDescriptor的用法示例。


在下文中一共展示了HTableDescriptor.getFamilies方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: testCreateTable

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
@Test
public void testCreateTable() throws IOException {
    clean();

    String tableName = tableNamePrefix + "0";
    HTableDescriptor descriptor = new HTableDescriptor(TableName.valueOf(tableName));
    admin.createTable(descriptor);

    // check name
    descriptor = admin.getTableDescriptor(TableName.valueOf(tableName));
    assertEquals(tableName, descriptor.getTableName().getNameAsString());

    // check maxVersion and TimeToLive
    Collection<HColumnDescriptor> columnDescriptors = descriptor.getFamilies();
    assertEquals(1, columnDescriptors.size());
    HColumnDescriptor columnDescriptor = columnDescriptors.iterator().next();
    assertEquals(1, columnDescriptor.getMaxVersions());
    assertEquals(Integer.MAX_VALUE, columnDescriptor.getTimeToLive());
    assertTrue(columnDescriptor.isBlockCacheEnabled());

    // check family name
    ColumnMapping columnMapping = new ColumnMapping(tableName, admin.getConfiguration());
    assertEquals(Bytes.toString(columnMapping.getFamilyNameBytes()), Bytes.toString(columnDescriptor.getName()));
}
 
開發者ID:aliyun,項目名稱:aliyun-tablestore-hbase-client,代碼行數:25,代碼來源:TestAdmin.java

示例2: configureCompression

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
/**
 * Serialize column family to compression algorithm map to configuration.
 * Invoked while configuring the MR job for incremental load.
 *
 * @param table to read the properties from
 * @param conf to persist serialized values into
 * @throws IOException
 *           on failure to read column family descriptors
 */
@edu.umd.cs.findbugs.annotations.SuppressWarnings(
    value="RCN_REDUNDANT_NULLCHECK_OF_NONNULL_VALUE")
@VisibleForTesting
static void configureCompression(Configuration conf, HTableDescriptor tableDescriptor)
    throws UnsupportedEncodingException {
  StringBuilder compressionConfigValue = new StringBuilder();
  if(tableDescriptor == null){
    // could happen with mock table instance
    return;
  }
  Collection<HColumnDescriptor> families = tableDescriptor.getFamilies();
  int i = 0;
  for (HColumnDescriptor familyDescriptor : families) {
    if (i++ > 0) {
      compressionConfigValue.append('&');
    }
    compressionConfigValue.append(URLEncoder.encode(
      familyDescriptor.getNameAsString(), "UTF-8"));
    compressionConfigValue.append('=');
    compressionConfigValue.append(URLEncoder.encode(
      familyDescriptor.getCompression().getName(), "UTF-8"));
  }
  // Get rid of the last ampersand
  conf.set(COMPRESSION_FAMILIES_CONF_KEY, compressionConfigValue.toString());
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:35,代碼來源:HFileOutputFormat2.java

示例3: configureBlockSize

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
/**
 * Serialize column family to block size map to configuration.
 * Invoked while configuring the MR job for incremental load.
 * @param tableDescriptor to read the properties from
 * @param conf to persist serialized values into
 *
 * @throws IOException
 *           on failure to read column family descriptors
 */
@VisibleForTesting
static void configureBlockSize(HTableDescriptor tableDescriptor, Configuration conf)
    throws UnsupportedEncodingException {
  StringBuilder blockSizeConfigValue = new StringBuilder();
  if (tableDescriptor == null) {
    // could happen with mock table instance
    return;
  }
  Collection<HColumnDescriptor> families = tableDescriptor.getFamilies();
  int i = 0;
  for (HColumnDescriptor familyDescriptor : families) {
    if (i++ > 0) {
      blockSizeConfigValue.append('&');
    }
    blockSizeConfigValue.append(URLEncoder.encode(
        familyDescriptor.getNameAsString(), "UTF-8"));
    blockSizeConfigValue.append('=');
    blockSizeConfigValue.append(URLEncoder.encode(
        String.valueOf(familyDescriptor.getBlocksize()), "UTF-8"));
  }
  // Get rid of the last ampersand
  conf.set(BLOCK_SIZE_FAMILIES_CONF_KEY, blockSizeConfigValue.toString());
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:33,代碼來源:HFileOutputFormat2.java

示例4: configureBloomType

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
/**
 * Serialize column family to bloom type map to configuration.
 * Invoked while configuring the MR job for incremental load.
 * @param tableDescriptor to read the properties from
 * @param conf to persist serialized values into
 *
 * @throws IOException
 *           on failure to read column family descriptors
 */
@VisibleForTesting
static void configureBloomType(HTableDescriptor tableDescriptor, Configuration conf)
    throws UnsupportedEncodingException {
  if (tableDescriptor == null) {
    // could happen with mock table instance
    return;
  }
  StringBuilder bloomTypeConfigValue = new StringBuilder();
  Collection<HColumnDescriptor> families = tableDescriptor.getFamilies();
  int i = 0;
  for (HColumnDescriptor familyDescriptor : families) {
    if (i++ > 0) {
      bloomTypeConfigValue.append('&');
    }
    bloomTypeConfigValue.append(URLEncoder.encode(
      familyDescriptor.getNameAsString(), "UTF-8"));
    bloomTypeConfigValue.append('=');
    String bloomType = familyDescriptor.getBloomFilterType().toString();
    if (bloomType == null) {
      bloomType = HColumnDescriptor.DEFAULT_BLOOMFILTER;
    }
    bloomTypeConfigValue.append(URLEncoder.encode(bloomType, "UTF-8"));
  }
  conf.set(BLOOM_TYPE_FAMILIES_CONF_KEY, bloomTypeConfigValue.toString());
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:35,代碼來源:HFileOutputFormat2.java

示例5: computeHDFSBlocksDistribution

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
/**
 * This is a helper function to compute HDFS block distribution on demand
 *
 * @param conf            configuration
 * @param tableDescriptor HTableDescriptor of the table
 * @param regionInfo      encoded name of the region
 * @param tablePath       the table directory
 * @return The HDFS blocks distribution for the given region.
 * @throws IOException
 */
public static HDFSBlocksDistribution computeHDFSBlocksDistribution(final Configuration conf,
    final HTableDescriptor tableDescriptor, final HRegionInfo regionInfo, Path tablePath)
    throws IOException {
  HDFSBlocksDistribution hdfsBlocksDistribution = new HDFSBlocksDistribution();
  FileSystem fs = tablePath.getFileSystem(conf);

  HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tablePath, regionInfo);
  for (HColumnDescriptor family : tableDescriptor.getFamilies()) {
    Collection<StoreFileInfo> storeFiles = regionFs.getStoreFiles(family.getNameAsString());
    if (storeFiles == null) continue;
    for (StoreFileInfo storeFileInfo : storeFiles) {
      try {
        hdfsBlocksDistribution.add(storeFileInfo.computeHDFSBlocksDistribution(fs));
      } catch (IOException ioe) {
        LOG.warn("Error getting hdfs block distribution for " + storeFileInfo);
      }
    }
  }
  return hdfsBlocksDistribution;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:31,代碼來源:HRegion.java

示例6: createTable

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
/**
 * Creates a new table with indexes defined by IndexDescriptor.
 *
 * @param indexDesc table descriptor for table
 * @throws IOException
 * @throws IndexExistedException
 */
public void createTable(IndexTableDescriptor indexDesc)
    throws IOException, IndexExistedException {
  HTableDescriptor descriptor = new HTableDescriptor(indexDesc.getTableDescriptor());
  descriptor.remove(IndexConstants.INDEX_KEY);
  admin.createTable(descriptor, indexDesc.getSplitKeys());
  admin.disableTable(descriptor.getTableName());

  if (indexDesc.hasIndex()) {
    // corresponding cct
    if (indexDesc.getIndexSpecifications()[0].getIndexType() == IndexType.CCIndex) {
      System.out.println("winter new cct of main table: " + Bytes.toString(Bytes
          .add(indexDesc.getTableDescriptor().getTableName().getName(), IndexConstants.CCT_FIX)));
      HTableDescriptor cctDesc = new HTableDescriptor(TableName.valueOf(Bytes
          .add(indexDesc.getTableDescriptor().getTableName().getName(), IndexConstants.CCT_FIX)));
      for (HColumnDescriptor f : descriptor.getFamilies()) {
        cctDesc.addFamily(f);
      }
      admin.createTable(cctDesc, indexDesc.getSplitKeys());
    }
    this.addIndexes(indexDesc.getTableDescriptor().getTableName(),
        indexDesc.getIndexSpecifications());
  }
  enableTable(descriptor.getTableName());
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:32,代碼來源:CCIndexAdmin.java

示例7: testGetDescriptor

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
@Test
public void testGetDescriptor() throws IOException {
    HTableDescriptor descriptor = table.getTableDescriptor();

    assertEquals(table.getName().getNameAsString(), descriptor.getNameAsString());

    Collection<HColumnDescriptor> columnDescriptors = descriptor.getFamilies();
    assertEquals(1, columnDescriptors.size());
    assertEquals(1, columnDescriptors.iterator().next().getMaxVersions());
    assertEquals(Integer.MAX_VALUE, columnDescriptors.iterator().next().getTimeToLive());
}
 
開發者ID:aliyun,項目名稱:aliyun-tablestore-hbase-client,代碼行數:12,代碼來源:TestTable.java

示例8: configureDataBlockEncoding

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
/**
 * Serialize column family to data block encoding map to configuration.
 * Invoked while configuring the MR job for incremental load.
 *
 * @param table to read the properties from
 * @param conf to persist serialized values into
 * @throws IOException
 *           on failure to read column family descriptors
 */
@VisibleForTesting
static void configureDataBlockEncoding(HTableDescriptor tableDescriptor,
    Configuration conf) throws UnsupportedEncodingException {
  if (tableDescriptor == null) {
    // could happen with mock table instance
    return;
  }
  StringBuilder dataBlockEncodingConfigValue = new StringBuilder();
  Collection<HColumnDescriptor> families = tableDescriptor.getFamilies();
  int i = 0;
  for (HColumnDescriptor familyDescriptor : families) {
    if (i++ > 0) {
      dataBlockEncodingConfigValue.append('&');
    }
    dataBlockEncodingConfigValue.append(
        URLEncoder.encode(familyDescriptor.getNameAsString(), "UTF-8"));
    dataBlockEncodingConfigValue.append('=');
    DataBlockEncoding encoding = familyDescriptor.getDataBlockEncoding();
    if (encoding == null) {
      encoding = DataBlockEncoding.NONE;
    }
    dataBlockEncodingConfigValue.append(URLEncoder.encode(encoding.toString(),
        "UTF-8"));
  }
  conf.set(DATABLOCK_ENCODING_FAMILIES_CONF_KEY,
      dataBlockEncodingConfigValue.toString());
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:37,代碼來源:HFileOutputFormat2.java

示例9: hasReferences

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
/**
 * Check whether region has Reference file
 *
 * @param htd table desciptor of the region
 * @return true if region has reference file
 * @throws IOException
 */
public boolean hasReferences(final HTableDescriptor htd) throws IOException {
  for (HColumnDescriptor family : htd.getFamilies()) {
    if (hasReferences(family.getNameAsString())) {
      return true;
    }
  }
  return false;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:16,代碼來源:HRegionFileSystem.java

示例10: setTableRep

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
/**
 * Set the table's replication switch if the table's replication switch is already not set.
 * @param tableName name of the table
 * @param isRepEnabled is replication switch enable or disable
 * @throws IOException if a remote or network exception occurs
 */
private void setTableRep(final TableName tableName, boolean isRepEnabled) throws IOException {
  Admin admin = null;
  try {
    admin = this.connection.getAdmin();
    HTableDescriptor htd = admin.getTableDescriptor(tableName);
    if (isTableRepEnabled(htd) ^ isRepEnabled) {
      boolean isOnlineSchemaUpdateEnabled =
          this.connection.getConfiguration()
              .getBoolean("hbase.online.schema.update.enable", true);
      if (!isOnlineSchemaUpdateEnabled) {
        admin.disableTable(tableName);
      }
      for (HColumnDescriptor hcd : htd.getFamilies()) {
        hcd.setScope(isRepEnabled ? HConstants.REPLICATION_SCOPE_GLOBAL
            : HConstants.REPLICATION_SCOPE_LOCAL);
      }
      admin.modifyTable(tableName, htd);
      if (!isOnlineSchemaUpdateEnabled) {
        admin.enableTable(tableName);
      }
    }
  } finally {
    if (admin != null) {
      try {
        admin.close();
      } catch (IOException e) {
        LOG.warn("Failed to close admin connection.");
        LOG.debug("Details on failure to close admin connection.", e);
      }
    }
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:39,代碼來源:ReplicationAdmin.java

示例11: isTableRepEnabled

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
/**
 * @param htd table descriptor details for the table to check
 * @return true if table's replication switch is enabled
 */
private boolean isTableRepEnabled(HTableDescriptor htd) {
  for (HColumnDescriptor hcd : htd.getFamilies()) {
    if (hcd.getScope() != HConstants.REPLICATION_SCOPE_GLOBAL) {
      return false;
    }
  }
  return true;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:13,代碼來源:ReplicationAdmin.java

示例12: getUnmodifyableFamilies

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
private static HColumnDescriptor[] getUnmodifyableFamilies(
    final HTableDescriptor desc) {
  HColumnDescriptor [] f = new HColumnDescriptor[desc.getFamilies().size()];
  int i = 0;
  for (HColumnDescriptor c: desc.getFamilies()) {
    f[i++] = c;
  }
  return f;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:10,代碼來源:UnmodifyableHTableDescriptor.java

示例13: checkDaughterInFs

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
/**
 * Checks if a daughter region -- either splitA or splitB -- still holds
 * references to parent.
 * @param parent Parent region
 * @param daughter Daughter region
 * @return A pair where the first boolean says whether or not the daughter
 * region directory exists in the filesystem and then the second boolean says
 * whether the daughter has references to the parent.
 * @throws IOException
 */
Pair<Boolean, Boolean> checkDaughterInFs(final HRegionInfo parent, final HRegionInfo daughter)
throws IOException {
  if (daughter == null)  {
    return new Pair<Boolean, Boolean>(Boolean.FALSE, Boolean.FALSE);
  }

  FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
  Path rootdir = this.services.getMasterFileSystem().getRootDir();
  Path tabledir = FSUtils.getTableDir(rootdir, daughter.getTable());

  Path daughterRegionDir = new Path(tabledir, daughter.getEncodedName());

  HRegionFileSystem regionFs = null;

  try {
    if (!FSUtils.isExists(fs, daughterRegionDir)) {
      return new Pair<Boolean, Boolean>(Boolean.FALSE, Boolean.FALSE);
    }
  } catch (IOException ioe) {
    LOG.warn("Error trying to determine if daughter region exists, " +
             "assuming exists and has references", ioe);
    return new Pair<Boolean, Boolean>(Boolean.TRUE, Boolean.TRUE);
  }

  try {
    regionFs = HRegionFileSystem.openRegionFromFileSystem(
        this.services.getConfiguration(), fs, tabledir, daughter, true);
  } catch (IOException e) {
    LOG.warn("Error trying to determine referenced files from : " + daughter.getEncodedName()
        + ", to: " + parent.getEncodedName() + " assuming has references", e);
    return new Pair<Boolean, Boolean>(Boolean.TRUE, Boolean.TRUE);
  }

  boolean references = false;
  HTableDescriptor parentDescriptor = getTableDescriptor(parent.getTable());
  for (HColumnDescriptor family: parentDescriptor.getFamilies()) {
    if ((references = regionFs.hasReferences(family.getNameAsString()))) {
      break;
    }
  }
  return new Pair<Boolean, Boolean>(Boolean.TRUE, Boolean.valueOf(references));
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:53,代碼來源:CatalogJanitor.java

示例14: testNonLegacyWALKeysDoNotExplode

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
@Test
public void testNonLegacyWALKeysDoNotExplode() throws Exception {
  TableName tableName = TableName.valueOf(TEST_TABLE);
  final HTableDescriptor htd = createBasic3FamilyHTD(Bytes
      .toString(TEST_TABLE));
  final HRegionInfo hri = new HRegionInfo(tableName, null, null);
  MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();

  fs.mkdirs(new Path(FSUtils.getTableDir(hbaseRootDir, tableName), hri.getEncodedName()));

  final Configuration newConf = HBaseConfiguration.create(this.conf);

  final WAL wal = wals.getWAL(UNSPECIFIED_REGION);
  final SampleRegionWALObserver newApi = getCoprocessor(wal, SampleRegionWALObserver.class);
  newApi.setTestValues(TEST_TABLE, TEST_ROW, null, null, null, null, null, null);
  final SampleRegionWALObserver oldApi = getCoprocessor(wal,
      SampleRegionWALObserver.Legacy.class);
  oldApi.setTestValues(TEST_TABLE, TEST_ROW, null, null, null, null, null, null);

  LOG.debug("ensuring wal entries haven't happened before we start");
  assertFalse(newApi.isPreWALWriteCalled());
  assertFalse(newApi.isPostWALWriteCalled());
  assertFalse(newApi.isPreWALWriteDeprecatedCalled());
  assertFalse(newApi.isPostWALWriteDeprecatedCalled());
  assertFalse(oldApi.isPreWALWriteCalled());
  assertFalse(oldApi.isPostWALWriteCalled());
  assertFalse(oldApi.isPreWALWriteDeprecatedCalled());
  assertFalse(oldApi.isPostWALWriteDeprecatedCalled());

  LOG.debug("writing to WAL with non-legacy keys.");
  final int countPerFamily = 5;
  for (HColumnDescriptor hcd : htd.getFamilies()) {
    addWALEdits(tableName, hri, TEST_ROW, hcd.getName(), countPerFamily,
        EnvironmentEdgeManager.getDelegate(), wal, htd, mvcc);
  }

  LOG.debug("Verify that only the non-legacy CP saw edits.");
  assertTrue(newApi.isPreWALWriteCalled());
  assertTrue(newApi.isPostWALWriteCalled());
  assertFalse(newApi.isPreWALWriteDeprecatedCalled());
  assertFalse(newApi.isPostWALWriteDeprecatedCalled());
  // wish we could test that the log message happened :/
  assertFalse(oldApi.isPreWALWriteCalled());
  assertFalse(oldApi.isPostWALWriteCalled());
  assertFalse(oldApi.isPreWALWriteDeprecatedCalled());
  assertFalse(oldApi.isPostWALWriteDeprecatedCalled());

  LOG.debug("reseting cp state.");
  newApi.setTestValues(TEST_TABLE, TEST_ROW, null, null, null, null, null, null);
  oldApi.setTestValues(TEST_TABLE, TEST_ROW, null, null, null, null, null, null);

  LOG.debug("write a log edit that supports legacy cps.");
  final long now = EnvironmentEdgeManager.currentTime();
  final WALKey legacyKey = new HLogKey(hri.getEncodedNameAsBytes(), hri.getTable(), now);
  final WALEdit edit = new WALEdit();
  final byte[] nonce = Bytes.toBytes("1772");
  edit.add(new KeyValue(TEST_ROW, TEST_FAMILY[0], nonce, now, nonce));
  final long txid = wal.append(htd, hri, legacyKey, edit, true);
  wal.sync(txid);

  LOG.debug("Make sure legacy cps can see supported edits after having been skipped.");
  assertTrue("non-legacy WALObserver didn't see pre-write.", newApi.isPreWALWriteCalled());
  assertTrue("non-legacy WALObserver didn't see post-write.", newApi.isPostWALWriteCalled());
  assertFalse("non-legacy WALObserver shouldn't have seen legacy pre-write.",
      newApi.isPreWALWriteDeprecatedCalled());
  assertFalse("non-legacy WALObserver shouldn't have seen legacy post-write.",
      newApi.isPostWALWriteDeprecatedCalled());
  assertTrue("legacy WALObserver didn't see pre-write.", oldApi.isPreWALWriteCalled());
  assertTrue("legacy WALObserver didn't see post-write.", oldApi.isPostWALWriteCalled());
  assertTrue("legacy WALObserver didn't see legacy pre-write.",
      oldApi.isPreWALWriteDeprecatedCalled());
  assertTrue("legacy WALObserver didn't see legacy post-write.",
      oldApi.isPostWALWriteDeprecatedCalled());
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:75,代碼來源:TestWALObserver.java

示例15: testWALCoprocessorReplay

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
/**
 * Test WAL replay behavior with WALObserver.
 */
@Test
public void testWALCoprocessorReplay() throws Exception {
  // WAL replay is handled at HRegion::replayRecoveredEdits(), which is
  // ultimately called by HRegion::initialize()
  TableName tableName = TableName.valueOf("testWALCoprocessorReplay");
  final HTableDescriptor htd = getBasic3FamilyHTableDescriptor(tableName);
  MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
  // final HRegionInfo hri =
  // createBasic3FamilyHRegionInfo(Bytes.toString(tableName));
  // final HRegionInfo hri1 =
  // createBasic3FamilyHRegionInfo(Bytes.toString(tableName));
  final HRegionInfo hri = new HRegionInfo(tableName, null, null);

  final Path basedir =
      FSUtils.getTableDir(this.hbaseRootDir, tableName);
  deleteDir(basedir);
  fs.mkdirs(new Path(basedir, hri.getEncodedName()));

  final Configuration newConf = HBaseConfiguration.create(this.conf);

  // WAL wal = new WAL(this.fs, this.dir, this.oldLogDir, this.conf);
  WAL wal = wals.getWAL(UNSPECIFIED_REGION);
  // Put p = creatPutWith2Families(TEST_ROW);
  WALEdit edit = new WALEdit();
  long now = EnvironmentEdgeManager.currentTime();
  // addFamilyMapToWALEdit(p.getFamilyMap(), edit);
  final int countPerFamily = 1000;
  // for (HColumnDescriptor hcd: hri.getTableDesc().getFamilies()) {
  for (HColumnDescriptor hcd : htd.getFamilies()) {
    addWALEdits(tableName, hri, TEST_ROW, hcd.getName(), countPerFamily,
        EnvironmentEdgeManager.getDelegate(), wal, htd, mvcc);
  }
  wal.append(htd, hri, new WALKey(hri.getEncodedNameAsBytes(), tableName, now, mvcc), edit, true);
  // sync to fs.
  wal.sync();

  User user = HBaseTestingUtility.getDifferentUser(newConf,
      ".replay.wal.secondtime");
  user.runAs(new PrivilegedExceptionAction() {
    public Object run() throws Exception {
      Path p = runWALSplit(newConf);
      LOG.info("WALSplit path == " + p);
      FileSystem newFS = FileSystem.get(newConf);
      // Make a new wal for new region open.
      final WALFactory wals2 = new WALFactory(conf, null, currentTest.getMethodName()+"2");
      WAL wal2 = wals2.getWAL(UNSPECIFIED_REGION);;
      HRegion region = HRegion.openHRegion(newConf, FileSystem.get(newConf), hbaseRootDir,
          hri, htd, wal2, TEST_UTIL.getHBaseCluster().getRegionServer(0), null);
      long seqid2 = region.getOpenSeqNum();

      SampleRegionWALObserver cp2 =
        (SampleRegionWALObserver)region.getCoprocessorHost().findCoprocessor(
            SampleRegionWALObserver.class.getName());
      // TODO: asserting here is problematic.
      assertNotNull(cp2);
      assertTrue(cp2.isPreWALRestoreCalled());
      assertTrue(cp2.isPostWALRestoreCalled());
      assertFalse(cp2.isPreWALRestoreDeprecatedCalled());
      assertFalse(cp2.isPostWALRestoreDeprecatedCalled());
      region.close();
      wals2.close();
      return null;
    }
  });
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:69,代碼來源:TestWALObserver.java


注:本文中的org.apache.hadoop.hbase.HTableDescriptor.getFamilies方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。