当前位置: 首页>>代码示例>>Java>>正文


Java MetaReader类代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.catalog.MetaReader的典型用法代码示例。如果您正苦于以下问题:Java MetaReader类的具体用法?Java MetaReader怎么用?Java MetaReader使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


MetaReader类属于org.apache.hadoop.hbase.catalog包,在下文中一共展示了MetaReader类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: fixupDaughter

import org.apache.hadoop.hbase.catalog.MetaReader; //导入依赖的package包/类
/**
 * Check individual daughter is up in .META.; fixup if its not.
 * @param result The contents of the parent row in .META.
 * @param qualifier Which daughter to check for.
 * @return 1 if the daughter is missing and fixed. Otherwise 0
 * @throws IOException
 */
static int fixupDaughter(final Result result, final byte [] qualifier,
    final AssignmentManager assignmentManager,
    final CatalogTracker catalogTracker)
throws IOException {
  HRegionInfo daughter =
    MetaReader.parseHRegionInfoFromCatalogResult(result, qualifier);
  if (daughter == null) return 0;
  if (isDaughterMissing(catalogTracker, daughter)) {
    LOG.info("Fixup; missing daughter " + daughter.getRegionNameAsString());
    MetaEditor.addDaughter(catalogTracker, daughter, null);

    // TODO: Log WARN if the regiondir does not exist in the fs.  If its not
    // there then something wonky about the split -- things will keep going
    // but could be missing references to parent region.

    // And assign it.
    assignmentManager.assign(daughter, true);
    return 1;
  } else {
    LOG.debug("Daughter " + daughter.getRegionNameAsString() + " present");
  }
  return 0;
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:31,代码来源:ServerShutdownHandler.java

示例2: assign

import org.apache.hadoop.hbase.catalog.MetaReader; //导入依赖的package包/类
@Override
public void assign(final byte [] regionName)throws IOException {
  checkInitialized();
  Pair<HRegionInfo, ServerName> pair =
    MetaReader.getRegion(this.catalogTracker, regionName);
  if (pair == null) throw new UnknownRegionException(Bytes.toString(regionName));
  if (cpHost != null) {
    if (cpHost.preAssign(pair.getFirst())) {
      return;
    }
  }
  assignRegion(pair.getFirst());
  if (cpHost != null) {
    cpHost.postAssign(pair.getFirst());
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:17,代码来源:HMaster.java

示例3: unassign

import org.apache.hadoop.hbase.catalog.MetaReader; //导入依赖的package包/类
@Override
public void unassign(final byte [] regionName, final boolean force)
throws IOException {
  checkInitialized();
  Pair<HRegionInfo, ServerName> pair =
    MetaReader.getRegion(this.catalogTracker, regionName);
  if (pair == null) throw new UnknownRegionException(Bytes.toString(regionName));
  HRegionInfo hri = pair.getFirst();
  if (cpHost != null) {
    if (cpHost.preUnassign(hri, force)) {
      return;
    }
  }
  if (force) {
    this.assignmentManager.regionOffline(hri);
    assignRegion(hri);
  } else {
    this.assignmentManager.unassign(hri, force);
  }
  if (cpHost != null) {
    cpHost.postUnassign(hri, force);
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:24,代码来源:HMaster.java

示例4: blockUntilRegionIsInMeta

import org.apache.hadoop.hbase.catalog.MetaReader; //导入依赖的package包/类
public static void blockUntilRegionIsInMeta(HTable metaTable, long timeout, HRegionInfo hri)
    throws IOException, InterruptedException {
  log("blocking until region is in META: " + hri.getRegionNameAsString());
  long start = System.currentTimeMillis();
  while (System.currentTimeMillis() - start < timeout) {
    Result result = getRegionRow(metaTable, hri.getRegionName());
    if (result != null) {
      HRegionInfo info = MetaReader.parseCatalogResult(result).getFirst();
      if (info != null && !info.isOffline()) {
        log("found region in META: " + hri.getRegionNameAsString());
        break;
      }
    }
    Threads.sleep(10);
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:17,代码来源:TestEndToEndSplitTransaction.java

示例5: postStartMaster

import org.apache.hadoop.hbase.catalog.MetaReader; //导入依赖的package包/类
/********************************* Master related hooks **********************************/

  @Override
  public void postStartMaster(ObserverContext<MasterCoprocessorEnvironment> ctx) throws IOException {
    // Need to create the new system table for labels here
    MasterServices master = ctx.getEnvironment().getMasterServices();
    if (!MetaReader.tableExists(master.getCatalogTracker(), LABELS_TABLE_NAME)) {
      HTableDescriptor labelsTable = new HTableDescriptor(LABELS_TABLE_NAME);
      HColumnDescriptor labelsColumn = new HColumnDescriptor(LABELS_TABLE_FAMILY);
      labelsColumn.setBloomFilterType(BloomType.NONE);
      labelsColumn.setBlockCacheEnabled(false); // We will cache all the labels. No need of normal
                                                 // table block cache.
      labelsTable.addFamily(labelsColumn);
      // Let the "labels" table having only one region always. We are not expecting too many labels in
      // the system.
      labelsTable.setValue(HTableDescriptor.SPLIT_POLICY,
          DisabledRegionSplitPolicy.class.getName());
      labelsTable.setValue(Bytes.toBytes(HConstants.DISALLOW_WRITES_IN_RECOVERING),
          Bytes.toBytes(true));
      master.createTable(labelsTable, null);
    }
  }
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:23,代码来源:VisibilityController.java

示例6: initTableReduceJob

import org.apache.hadoop.hbase.catalog.MetaReader; //导入依赖的package包/类
/**
 * Use this before submitting a TableReduce job. It will
 * appropriately set up the JobConf.
 *
 * @param table  The output table.
 * @param reducer  The reducer class to use.
 * @param job  The current job configuration to adjust.
 * @param partitioner  Partitioner to use. Pass <code>null</code> to use
 * default partitioner.
 * @param addDependencyJars upload HBase jars and jars for any of the configured
 *           job classes via the distributed cache (tmpjars).
 * @throws IOException When determining the region count fails.
 */
public static void initTableReduceJob(String table,
  Class<? extends TableReduce> reducer, JobConf job, Class partitioner,
  boolean addDependencyJars) throws IOException {
  job.setOutputFormat(TableOutputFormat.class);
  job.setReducerClass(reducer);
  job.set(TableOutputFormat.OUTPUT_TABLE, table);
  job.setOutputKeyClass(ImmutableBytesWritable.class);
  job.setOutputValueClass(Put.class);
  job.setStrings("io.serializations", job.get("io.serializations"),
      MutationSerialization.class.getName(), ResultSerialization.class.getName());
  if (partitioner == HRegionPartitioner.class) {
    job.setPartitionerClass(HRegionPartitioner.class);
    int regions = MetaReader.getRegionCount(HBaseConfiguration.create(job), table);
    if (job.getNumReduceTasks() > regions) {
      job.setNumReduceTasks(regions);
    }
  } else if (partitioner != null) {
    job.setPartitionerClass(partitioner);
  }
  if (addDependencyJars) {
    addDependencyJars(job);
  }
  initCredentials(job);
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:38,代码来源:TableMapReduceUtil.java

示例7: getRegionInfo

import org.apache.hadoop.hbase.catalog.MetaReader; //导入依赖的package包/类
/**
 * Get the HRegionInfo from cache, if not there, from the hbase:meta table
 * @param  regionName
 * @return HRegionInfo for the region
 */
protected HRegionInfo getRegionInfo(final byte [] regionName) {
  String encodedName = HRegionInfo.encodeRegionName(regionName);
  RegionState regionState = regionStates.get(encodedName);
  if (regionState != null) {
    return regionState.getRegion();
  }

  try {
    Pair<HRegionInfo, ServerName> p =
      MetaReader.getRegion(server.getCatalogTracker(), regionName);
    HRegionInfo hri = p == null ? null : p.getFirst();
    if (hri != null) {
      createRegionState(hri);
    }
    return hri;
  } catch (IOException e) {
    server.abort("Aborting because error occoured while reading "
      + Bytes.toStringBinary(regionName) + " from hbase:meta", e);
    return null;
  }
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:27,代码来源:RegionStates.java

示例8: cleanMergeQualifier

import org.apache.hadoop.hbase.catalog.MetaReader; //导入依赖的package包/类
/**
 * Checks if the specified region has merge qualifiers, if so, try to clean
 * them
 * @param region
 * @return true if the specified region doesn't have merge qualifier now
 * @throws IOException
 */
public boolean cleanMergeQualifier(final HRegionInfo region)
    throws IOException {
  // Get merge regions if it is a merged region and already has merge
  // qualifier
  Pair<HRegionInfo, HRegionInfo> mergeRegions = MetaReader
      .getRegionsFromMergeQualifier(this.services.getCatalogTracker(),
          region.getRegionName());
  if (mergeRegions == null
      || (mergeRegions.getFirst() == null && mergeRegions.getSecond() == null)) {
    // It doesn't have merge qualifier, no need to clean
    return true;
  }
  // It shouldn't happen, we must insert/delete these two qualifiers together
  if (mergeRegions.getFirst() == null || mergeRegions.getSecond() == null) {
    LOG.error("Merged region " + region.getRegionNameAsString()
        + " has only one merge qualifier in META.");
    return false;
  }
  return cleanMergeRegion(region, mergeRegions.getFirst(),
      mergeRegions.getSecond());
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:29,代码来源:CatalogJanitor.java

示例9: getReopenStatus

import org.apache.hadoop.hbase.catalog.MetaReader; //导入依赖的package包/类
/**
 * Used by the client to identify if all regions have the schema updates
 *
 * @param tableName
 * @return Pair indicating the status of the alter command
 * @throws IOException
 */
public Pair<Integer, Integer> getReopenStatus(TableName tableName)
    throws IOException {
  List <HRegionInfo> hris =
    MetaReader.getTableRegions(this.server.getCatalogTracker(), tableName, true);
  Integer pending = 0;
  for (HRegionInfo hri : hris) {
    String name = hri.getEncodedName();
    // no lock concurrent access ok: sequential consistency respected.
    if (regionsToReopen.containsKey(name)
        || regionStates.isRegionInTransition(name)) {
      pending++;
    }
  }
  return new Pair<Integer, Integer>(pending, hris.size());
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:23,代码来源:AssignmentManager.java

示例10: waitAndVerifyRegionNum

import org.apache.hadoop.hbase.catalog.MetaReader; //导入依赖的package包/类
private void waitAndVerifyRegionNum(HMaster master, TableName tablename,
    int expectedRegionNum) throws Exception {
  List<Pair<HRegionInfo, ServerName>> tableRegionsInMeta;
  List<HRegionInfo> tableRegionsInMaster;
  long timeout = System.currentTimeMillis() + waitTime;
  while (System.currentTimeMillis() < timeout) {
    tableRegionsInMeta = MetaReader.getTableRegionsAndLocations(
        master.getCatalogTracker(), tablename);
    tableRegionsInMaster = master.getAssignmentManager().getRegionStates()
        .getRegionsOfTable(tablename);
    if (tableRegionsInMeta.size() == expectedRegionNum
        && tableRegionsInMaster.size() == expectedRegionNum) {
      break;
    }
    Thread.sleep(250);
  }

  tableRegionsInMeta = MetaReader.getTableRegionsAndLocations(
      master.getCatalogTracker(), tablename);
  LOG.info("Regions after merge:" + Joiner.on(',').join(tableRegionsInMeta));
  assertEquals(expectedRegionNum, tableRegionsInMeta.size());
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:23,代码来源:TestRegionMergeTransactionOnCluster.java

示例11: disableAndDeleteTable

import org.apache.hadoop.hbase.catalog.MetaReader; //导入依赖的package包/类
private void disableAndDeleteTable(MasterServices master, TableName tableName) throws IOException {
  LOG.error(tableName + " already exists.  Disabling and deleting table " + tableName + '.');
  boolean disabled = master.getAssignmentManager().getZKTable().isDisabledTable(tableName);
  if (false == disabled) {
    LOG.info("Disabling table " + tableName + '.');
    new DisableTableHandler(master, tableName, master.getCatalogTracker(),
        master.getAssignmentManager(), master.getTableLockManager(), false).prepare().process();
    if (false == master.getAssignmentManager().getZKTable().isDisabledTable(tableName)) {
      throw new DoNotRetryIOException("Table " + tableName + " not disabled.");
    }
  }
  LOG.info("Disabled table " + tableName + '.');
  LOG.info("Deleting table " + tableName + '.');
  new DeleteTableHandler(tableName, master, master).prepare().process();
  if (true == MetaReader.tableExists(master.getCatalogTracker(), tableName)) {
    throw new DoNotRetryIOException("Table " + tableName + " not  deleted.");
  }
  LOG.info("Deleted table " + tableName + '.');
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:20,代码来源:IndexMasterObserver.java

示例12: getStartKeysAndLocations

import org.apache.hadoop.hbase.catalog.MetaReader; //导入依赖的package包/类
public static List<Pair<byte[], ServerName>> getStartKeysAndLocations(HMaster master,
    String tableName) throws IOException, InterruptedException {

  List<Pair<HRegionInfo, ServerName>> tableRegionsAndLocations =
      MetaReader.getTableRegionsAndLocations(master.getCatalogTracker(),
        TableName.valueOf(tableName));
  List<Pair<byte[], ServerName>> startKeyAndLocationPairs =
      new ArrayList<Pair<byte[], ServerName>>(tableRegionsAndLocations.size());
  Pair<byte[], ServerName> startKeyAndLocation = null;
  for (Pair<HRegionInfo, ServerName> regionAndLocation : tableRegionsAndLocations) {
    startKeyAndLocation =
        new Pair<byte[], ServerName>(regionAndLocation.getFirst().getStartKey(),
            regionAndLocation.getSecond());
    startKeyAndLocationPairs.add(startKeyAndLocation);
  }
  return startKeyAndLocationPairs;

}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:19,代码来源:TestUtils.java

示例13: getStartKeysAndLocations

import org.apache.hadoop.hbase.catalog.MetaReader; //导入依赖的package包/类
private List<Pair<byte[], ServerName>> getStartKeysAndLocations(HMaster master, String tableName)
    throws IOException, InterruptedException {

  List<Pair<HRegionInfo, ServerName>> tableRegionsAndLocations =
      MetaReader.getTableRegionsAndLocations(master.getCatalogTracker(),
        TableName.valueOf(tableName));
  List<Pair<byte[], ServerName>> startKeyAndLocationPairs =
      new ArrayList<Pair<byte[], ServerName>>(tableRegionsAndLocations.size());
  Pair<byte[], ServerName> startKeyAndLocation = null;
  for (Pair<HRegionInfo, ServerName> regionAndLocation : tableRegionsAndLocations) {
    startKeyAndLocation =
        new Pair<byte[], ServerName>(regionAndLocation.getFirst().getStartKey(),
            regionAndLocation.getSecond());
    startKeyAndLocationPairs.add(startKeyAndLocation);
  }
  return startKeyAndLocationPairs;

}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:19,代码来源:TestSecIndexLoadBalancer.java

示例14: testIndexTableCreationAlongWithNormalTablesAfterMasterRestart

import org.apache.hadoop.hbase.catalog.MetaReader; //导入依赖的package包/类
@Test(timeout = 180000)
public void testIndexTableCreationAlongWithNormalTablesAfterMasterRestart() throws Exception {
  TableName tableName =
      TableName.valueOf("testIndexTableCreationAlongWithNormalTablesAfterMasterRestart");
  HTableDescriptor htd = new HTableDescriptor(tableName);
  admin.createTable(htd);
  MiniHBaseCluster cluster = UTIL.getHBaseCluster();
  cluster.abortMaster(0);
  cluster.waitOnMaster(0);
  HMaster master = cluster.startMaster().getMaster();
  cluster.waitForActiveAndReadyMaster();

  boolean tableExist =
      MetaReader.tableExists(master.getCatalogTracker(),
        TableName.valueOf(IndexUtils.getIndexTableName(tableName)));
  assertFalse("Index table should be not created after master start up.", tableExist);
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:18,代码来源:TestIndexMasterObserver.java

示例15: unassign

import org.apache.hadoop.hbase.catalog.MetaReader; //导入依赖的package包/类
@Override
public void unassign(final byte [] regionName, final boolean force)
throws IOException {
  Pair<HRegionInfo, ServerName> pair =
    MetaReader.getRegion(this.catalogTracker, regionName);
  if (pair == null) throw new UnknownRegionException(Bytes.toString(regionName));
  HRegionInfo hri = pair.getFirst();
  if (cpHost != null) {
    if (cpHost.preUnassign(hri, force)) {
      return;
    }
  }
  if (force) {
    this.assignmentManager.clearRegionFromTransition(hri);
    assignRegion(hri);
  } else {
    this.assignmentManager.unassign(hri, force);
  }
  if (cpHost != null) {
    cpHost.postUnassign(hri, force);
  }
}
 
开发者ID:lifeng5042,项目名称:RStore,代码行数:23,代码来源:HMaster.java


注:本文中的org.apache.hadoop.hbase.catalog.MetaReader类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。