当前位置: 首页>>代码示例>>Java>>正文


Java TableIntegrityErrorHandler类代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandler的典型用法代码示例。如果您正苦于以下问题:Java TableIntegrityErrorHandler类的具体用法?Java TableIntegrityErrorHandler怎么用?Java TableIntegrityErrorHandler使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


TableIntegrityErrorHandler类属于org.apache.hadoop.hbase.util.hbck包,在下文中一共展示了TableIntegrityErrorHandler类的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: checkHdfsIntegrity

import org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandler; //导入依赖的package包/类
private SortedMap<TableName, TableInfo> checkHdfsIntegrity(boolean fixHoles,
    boolean fixOverlaps) throws IOException {
  LOG.info("Checking HBase region split map from HDFS data...");
  logParallelMerge();
  for (TableInfo tInfo : tablesInfo.values()) {
    TableIntegrityErrorHandler handler;
    if (fixHoles || fixOverlaps) {
      handler = tInfo.new HDFSIntegrityFixer(tInfo, errors, getConf(),
        fixHoles, fixOverlaps);
    } else {
      handler = tInfo.new IntegrityFixSuggester(tInfo, errors);
    }
    if (!tInfo.checkRegionChain(handler)) {
      // should dump info as well.
      errors.report("Found inconsistency in table " + tInfo.getName());
    }
  }
  return tablesInfo;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:HBaseFsck.java

示例2: checkHdfsIntegrity

import org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandler; //导入依赖的package包/类
private SortedMap<String, TableInfo> checkHdfsIntegrity(boolean fixHoles,
    boolean fixOverlaps) throws IOException {
  LOG.info("Checking HBase region split map from HDFS data...");
  for (TableInfo tInfo : tablesInfo.values()) {
    TableIntegrityErrorHandler handler;
    if (fixHoles || fixOverlaps) {
      handler = tInfo.new HDFSIntegrityFixer(tInfo, errors, getConf(),
        fixHoles, fixOverlaps);
    } else {
      handler = tInfo.new IntegrityFixSuggester(tInfo, errors);
    }
    if (!tInfo.checkRegionChain(handler)) {
      // should dump info as well.
      errors.report("Found inconsistency in table " + tInfo.getName());
    }
  }
  return tablesInfo;
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:19,代码来源:HBaseFsck.java

示例3: checkHdfsIntegrity

import org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandler; //导入依赖的package包/类
private SortedMap<TableName, TableInfo> checkHdfsIntegrity(boolean fixHoles,
    boolean fixOverlaps) throws IOException {
  LOG.info("Checking HBase region split map from HDFS data...");
  for (TableInfo tInfo : tablesInfo.values()) {
    TableIntegrityErrorHandler handler;
    if (fixHoles || fixOverlaps) {
      handler = tInfo.new HDFSIntegrityFixer(tInfo, errors, getConf(),
        fixHoles, fixOverlaps);
    } else {
      handler = tInfo.new IntegrityFixSuggester(tInfo, errors);
    }
    if (!tInfo.checkRegionChain(handler)) {
      // should dump info as well.
      errors.report("Found inconsistency in table " + tInfo.getName());
    }
  }
  return tablesInfo;
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:19,代码来源:HBaseFsck.java

示例4: suggestFixes

import org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandler; //导入依赖的package包/类
/**
 * Suggest fixes for each table
 */
private void suggestFixes(
    SortedMap<TableName, TableInfo> tablesInfo) throws IOException {
  logParallelMerge();
  for (TableInfo tInfo : tablesInfo.values()) {
    TableIntegrityErrorHandler handler = tInfo.new IntegrityFixSuggester(tInfo, errors);
    tInfo.checkRegionChain(handler);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:12,代码来源:HBaseFsck.java

示例5: suggestFixes

import org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandler; //导入依赖的package包/类
/**
 * Suggest fixes for each table
 */
private void suggestFixes(SortedMap<String, TableInfo> tablesInfo) throws IOException {
  for (TableInfo tInfo : tablesInfo.values()) {
    TableIntegrityErrorHandler handler = tInfo.new IntegrityFixSuggester(tInfo, errors);
    tInfo.checkRegionChain(handler);
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:10,代码来源:HBaseFsck.java

示例6: suggestFixes

import org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandler; //导入依赖的package包/类
/**
 * Suggest fixes for each table
 */
private void suggestFixes(
    SortedMap<TableName, TableInfo> tablesInfo) throws IOException {
  for (TableInfo tInfo : tablesInfo.values()) {
    TableIntegrityErrorHandler handler = tInfo.new IntegrityFixSuggester(tInfo, errors);
    tInfo.checkRegionChain(handler);
  }
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:11,代码来源:HBaseFsck.java

示例7: checkIntegrity

import org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandler; //导入依赖的package包/类
/**
 * Checks tables integrity. Goes over all regions and scans the tables.
 * Collects all the pieces for each table and checks if there are missing,
 * repeated or overlapping ones.
 * @throws IOException
 */
SortedMap<TableName, TableInfo> checkIntegrity() throws IOException {
  tablesInfo = new TreeMap<TableName,TableInfo> ();
  LOG.debug("There are " + regionInfoMap.size() + " region info entries");
  for (HbckInfo hbi : regionInfoMap.values()) {
    // Check only valid, working regions
    if (hbi.metaEntry == null) {
      // this assumes that consistency check has run loadMetaEntry
      Path p = hbi.getHdfsRegionDir();
      if (p == null) {
        errors.report("No regioninfo in Meta or HDFS. " + hbi);
      }

      // TODO test.
      continue;
    }
    if (hbi.metaEntry.regionServer == null) {
      errors.detail("Skipping region because no region server: " + hbi);
      continue;
    }
    if (hbi.metaEntry.isOffline()) {
      errors.detail("Skipping region because it is offline: " + hbi);
      continue;
    }
    if (hbi.containsOnlyHdfsEdits()) {
      errors.detail("Skipping region because it only contains edits" + hbi);
      continue;
    }

    // Missing regionDir or over-deployment is checked elsewhere. Include
    // these cases in modTInfo, so we can evaluate those regions as part of
    // the region chain in META
    //if (hbi.foundRegionDir == null) continue;
    //if (hbi.deployedOn.size() != 1) continue;
    if (hbi.deployedOn.size() == 0) continue;

    // We should be safe here
    TableName tableName = hbi.metaEntry.getTable();
    TableInfo modTInfo = tablesInfo.get(tableName);
    if (modTInfo == null) {
      modTInfo = new TableInfo(tableName);
    }
    for (ServerName server : hbi.deployedOn) {
      modTInfo.addServer(server);
    }

    if (!hbi.isSkipChecks()) {
      modTInfo.addRegionInfo(hbi);
    }

    tablesInfo.put(tableName, modTInfo);
  }

  loadTableInfosForTablesWithNoRegion();

  logParallelMerge();
  for (TableInfo tInfo : tablesInfo.values()) {
    TableIntegrityErrorHandler handler = tInfo.new IntegrityFixSuggester(tInfo, errors);
    if (!tInfo.checkRegionChain(handler)) {
      errors.report("Found inconsistency in table " + tInfo.getName());
    }
  }
  return tablesInfo;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:70,代码来源:HBaseFsck.java

示例8: WorkItemOverlapMerge

import org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandler; //导入依赖的package包/类
WorkItemOverlapMerge(Collection<HbckInfo> overlapgroup, TableIntegrityErrorHandler handler) {
  this.handler = handler;
  this.overlapgroup = overlapgroup;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:5,代码来源:HBaseFsck.java

示例9: checkIntegrity

import org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandler; //导入依赖的package包/类
/**
 * Checks tables integrity. Goes over all regions and scans the tables.
 * Collects all the pieces for each table and checks if there are missing,
 * repeated or overlapping ones.
 * @throws IOException
 */
SortedMap<String, TableInfo> checkIntegrity() throws IOException {
  tablesInfo = new TreeMap<String,TableInfo> ();
  List<HbckInfo> noHDFSRegionInfos = new ArrayList<HbckInfo>();
  LOG.debug("There are " + regionInfoMap.size() + " region info entries");
  for (HbckInfo hbi : regionInfoMap.values()) {
    // Check only valid, working regions
    if (hbi.metaEntry == null) {
      // this assumes that consistency check has run loadMetaEntry
      noHDFSRegionInfos.add(hbi);
      Path p = hbi.getHdfsRegionDir();
      if (p == null) {
        errors.report("No regioninfo in Meta or HDFS. " + hbi);
      }

      // TODO test.
      continue;
    }
    if (hbi.metaEntry.regionServer == null) {
      errors.detail("Skipping region because no region server: " + hbi);
      continue;
    }
    if (hbi.metaEntry.isOffline()) {
      errors.detail("Skipping region because it is offline: " + hbi);
      continue;
    }
    if (hbi.containsOnlyHdfsEdits()) {
      errors.detail("Skipping region because it only contains edits" + hbi);
      continue;
    }

    // Missing regionDir or over-deployment is checked elsewhere. Include
    // these cases in modTInfo, so we can evaluate those regions as part of
    // the region chain in META
    //if (hbi.foundRegionDir == null) continue;
    //if (hbi.deployedOn.size() != 1) continue;
    if (hbi.deployedOn.size() == 0) continue;

    // We should be safe here
    String tableName = hbi.metaEntry.getTableNameAsString();
    TableInfo modTInfo = tablesInfo.get(tableName);
    if (modTInfo == null) {
      modTInfo = new TableInfo(tableName);
    }
    for (ServerName server : hbi.deployedOn) {
      modTInfo.addServer(server);
    }

    if (!hbi.isSkipChecks()) {
      modTInfo.addRegionInfo(hbi);
    }

    tablesInfo.put(tableName, modTInfo);
  }

  for (TableInfo tInfo : tablesInfo.values()) {
    TableIntegrityErrorHandler handler = tInfo.new IntegrityFixSuggester(tInfo, errors);
    if (!tInfo.checkRegionChain(handler)) {
      errors.report("Found inconsistency in table " + tInfo.getName());
    }
  }
  return tablesInfo;
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:69,代码来源:HBaseFsck.java

示例10: checkIntegrity

import org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandler; //导入依赖的package包/类
/**
 * Checks tables integrity. Goes over all regions and scans the tables.
 * Collects all the pieces for each table and checks if there are missing,
 * repeated or overlapping ones.
 * @throws IOException
 */
SortedMap<TableName, TableInfo> checkIntegrity() throws IOException {
  tablesInfo = new TreeMap<TableName,TableInfo> ();
  LOG.debug("There are " + regionInfoMap.size() + " region info entries");
  for (HbckInfo hbi : regionInfoMap.values()) {
    // Check only valid, working regions
    if (hbi.metaEntry == null) {
      // this assumes that consistency check has run loadMetaEntry
      Path p = hbi.getHdfsRegionDir();
      if (p == null) {
        errors.report("No regioninfo in Meta or HDFS. " + hbi);
      }

      // TODO test.
      continue;
    }
    if (hbi.metaEntry.regionServer == null) {
      errors.detail("Skipping region because no region server: " + hbi);
      continue;
    }
    if (hbi.metaEntry.isOffline()) {
      errors.detail("Skipping region because it is offline: " + hbi);
      continue;
    }
    if (hbi.containsOnlyHdfsEdits()) {
      errors.detail("Skipping region because it only contains edits" + hbi);
      continue;
    }

    // Missing regionDir or over-deployment is checked elsewhere. Include
    // these cases in modTInfo, so we can evaluate those regions as part of
    // the region chain in META
    //if (hbi.foundRegionDir == null) continue;
    //if (hbi.deployedOn.size() != 1) continue;
    if (hbi.deployedOn.size() == 0) continue;

    // We should be safe here
    TableName tableName = hbi.metaEntry.getTable();
    TableInfo modTInfo = tablesInfo.get(tableName);
    if (modTInfo == null) {
      modTInfo = new TableInfo(tableName);
    }
    for (ServerName server : hbi.deployedOn) {
      modTInfo.addServer(server);
    }

    if (!hbi.isSkipChecks()) {
      modTInfo.addRegionInfo(hbi);
    }

    tablesInfo.put(tableName, modTInfo);
  }

  loadTableInfosForTablesWithNoRegion();

  for (TableInfo tInfo : tablesInfo.values()) {
    TableIntegrityErrorHandler handler = tInfo.new IntegrityFixSuggester(tInfo, errors);
    if (!tInfo.checkRegionChain(handler)) {
      errors.report("Found inconsistency in table " + tInfo.getName());
    }
  }
  return tablesInfo;
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:69,代码来源:HBaseFsck.java

示例11: checkIntegrity

import org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandler; //导入依赖的package包/类
/**
 * Checks tables integrity. Goes over all regions and scans the tables.
 * Collects all the pieces for each table and checks if there are missing,
 * repeated or overlapping ones.
 * @throws IOException
 */
SortedMap<TableName, TableInfo> checkIntegrity() throws IOException {
  tablesInfo = new TreeMap<TableName,TableInfo> ();
  List<HbckInfo> noHDFSRegionInfos = new ArrayList<HbckInfo>();
  LOG.debug("There are " + regionInfoMap.size() + " region info entries");
  for (HbckInfo hbi : regionInfoMap.values()) {
    // Check only valid, working regions
    if (hbi.metaEntry == null) {
      // this assumes that consistency check has run loadMetaEntry
      noHDFSRegionInfos.add(hbi);
      Path p = hbi.getHdfsRegionDir();
      if (p == null) {
        errors.report("No regioninfo in Meta or HDFS. " + hbi);
      }

      // TODO test.
      continue;
    }
    if (hbi.metaEntry.regionServer == null) {
      errors.detail("Skipping region because no region server: " + hbi);
      continue;
    }
    if (hbi.metaEntry.isOffline()) {
      errors.detail("Skipping region because it is offline: " + hbi);
      continue;
    }
    if (hbi.containsOnlyHdfsEdits()) {
      errors.detail("Skipping region because it only contains edits" + hbi);
      continue;
    }

    // Missing regionDir or over-deployment is checked elsewhere. Include
    // these cases in modTInfo, so we can evaluate those regions as part of
    // the region chain in META
    //if (hbi.foundRegionDir == null) continue;
    //if (hbi.deployedOn.size() != 1) continue;
    if (hbi.deployedOn.size() == 0) continue;

    // We should be safe here
    TableName tableName = hbi.metaEntry.getTable();
    TableInfo modTInfo = tablesInfo.get(tableName);
    if (modTInfo == null) {
      modTInfo = new TableInfo(tableName);
    }
    for (ServerName server : hbi.deployedOn) {
      modTInfo.addServer(server);
    }

    if (!hbi.isSkipChecks()) {
      modTInfo.addRegionInfo(hbi);
    }

    tablesInfo.put(tableName, modTInfo);
  }

  loadTableInfosForTablesWithNoRegion();

  for (TableInfo tInfo : tablesInfo.values()) {
    TableIntegrityErrorHandler handler = tInfo.new IntegrityFixSuggester(tInfo, errors);
    if (!tInfo.checkRegionChain(handler)) {
      errors.report("Found inconsistency in table " + tInfo.getName());
    }
  }
  return tablesInfo;
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:71,代码来源:HBaseFsck.java

示例12: checkIntegrity

import org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandler; //导入依赖的package包/类
/**
 * Checks tables integrity. Goes over all regions and scans the tables.
 * Collects all the pieces for each table and checks if there are missing,
 * repeated or overlapping ones.
 * @throws IOException
 */
SortedMap<TableName, TableInfo> checkIntegrity() throws IOException {
  tablesInfo = new TreeMap<>();
  LOG.debug("There are " + regionInfoMap.size() + " region info entries");
  for (HbckInfo hbi : regionInfoMap.values()) {
    // Check only valid, working regions
    if (hbi.metaEntry == null) {
      // this assumes that consistency check has run loadMetaEntry
      Path p = hbi.getHdfsRegionDir();
      if (p == null) {
        errors.report("No regioninfo in Meta or HDFS. " + hbi);
      }

      // TODO test.
      continue;
    }
    if (hbi.metaEntry.regionServer == null) {
      errors.detail("Skipping region because no region server: " + hbi);
      continue;
    }
    if (hbi.metaEntry.isOffline()) {
      errors.detail("Skipping region because it is offline: " + hbi);
      continue;
    }
    if (hbi.containsOnlyHdfsEdits()) {
      errors.detail("Skipping region because it only contains edits" + hbi);
      continue;
    }

    // Missing regionDir or over-deployment is checked elsewhere. Include
    // these cases in modTInfo, so we can evaluate those regions as part of
    // the region chain in META
    //if (hbi.foundRegionDir == null) continue;
    //if (hbi.deployedOn.size() != 1) continue;
    if (hbi.deployedOn.isEmpty()) continue;

    // We should be safe here
    TableName tableName = hbi.metaEntry.getTable();
    TableInfo modTInfo = tablesInfo.get(tableName);
    if (modTInfo == null) {
      modTInfo = new TableInfo(tableName);
    }
    for (ServerName server : hbi.deployedOn) {
      modTInfo.addServer(server);
    }

    if (!hbi.isSkipChecks()) {
      modTInfo.addRegionInfo(hbi);
    }

    tablesInfo.put(tableName, modTInfo);
  }

  loadTableInfosForTablesWithNoRegion();

  logParallelMerge();
  for (TableInfo tInfo : tablesInfo.values()) {
    TableIntegrityErrorHandler handler = tInfo.new IntegrityFixSuggester(tInfo, errors);
    if (!tInfo.checkRegionChain(handler)) {
      errors.report("Found inconsistency in table " + tInfo.getName());
    }
  }
  return tablesInfo;
}
 
开发者ID:apache,项目名称:hbase,代码行数:70,代码来源:HBaseFsck.java


注:本文中的org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandler类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。