当前位置: 首页>>代码示例>>Java>>正文


Java ErrorReporter类代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter的典型用法代码示例。如果您正苦于以下问题:Java ErrorReporter类的具体用法?Java ErrorReporter怎么用?Java ErrorReporter使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


ErrorReporter类属于org.apache.hadoop.hbase.util.HBaseFsck包,在下文中一共展示了ErrorReporter类的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getTableStoreFilePathMap

import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter; //导入依赖的package包/类
/**
 * Runs through the HBase rootdir and creates a reverse lookup map for
 * table StoreFile names to the full Path.
 * <br>
 * Example...<br>
 * Key = 3944417774205889744  <br>
 * Value = hdfs://localhost:51169/user/userid/-ROOT-/70236052/info/3944417774205889744
 *
 * @param fs  The file system to use.
 * @param hbaseRootDir  The root directory to scan.
 * @param errors ErrorReporter instance or null
 * @return Map keyed by StoreFile name with a value of the full Path.
 * @throws IOException When scanning the directory fails.
 */
public static Map<String, Path> getTableStoreFilePathMap(
  final FileSystem fs, final Path hbaseRootDir, ErrorReporter errors)
throws IOException {
  Map<String, Path> map = new HashMap<String, Path>();

  // if this method looks similar to 'getTableFragmentation' that is because
  // it was borrowed from it.

  // only include the directory paths to tables
  for (Path tableDir : FSUtils.getTableDirs(fs, hbaseRootDir)) {
    getTableStoreFilePathMap(map, fs, hbaseRootDir,
        FSUtils.getTableName(tableDir), errors);
  }
  return map;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:30,代码来源:FSUtils.java

示例2: checkUnDeletedQueues

import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter; //导入依赖的package包/类
public void checkUnDeletedQueues() throws ReplicationException {
  undeletedQueueIds = getUnDeletedQueues();
  undeletedQueueIds.forEach((replicator, queueIds) -> {
    queueIds.forEach(queueId -> {
      ReplicationQueueInfo queueInfo = new ReplicationQueueInfo(queueId);
      String msg = "Undeleted replication queue for removed peer found: " +
        String.format("[removedPeerId=%s, replicator=%s, queueId=%s]", queueInfo.getPeerId(),
          replicator, queueId);
      errorReporter.reportError(HBaseFsck.ErrorReporter.ERROR_CODE.UNDELETED_REPLICATION_QUEUE,
        msg);
    });
  });
  undeletedHFileRefsPeerIds = getUndeletedHFileRefsPeers();
  undeletedHFileRefsPeerIds.stream()
      .map(
        peerId -> "Undeleted replication hfile-refs queue for removed peer " + peerId + " found")
      .forEach(msg -> errorReporter
          .reportError(HBaseFsck.ErrorReporter.ERROR_CODE.UNDELETED_REPLICATION_QUEUE, msg));
}
 
开发者ID:apache,项目名称:hbase,代码行数:20,代码来源:ReplicationChecker.java

示例3: TableLockChecker

import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter; //导入依赖的package包/类
public TableLockChecker(ZooKeeperWatcher zkWatcher, ErrorReporter errorReporter) {
  this.zkWatcher = zkWatcher;
  this.errorReporter = errorReporter;
  expireTimeout = zkWatcher.getConfiguration().getLong(
      TableLockManager.TABLE_LOCK_EXPIRE_TIMEOUT,
      TableLockManager.DEFAULT_TABLE_LOCK_EXPIRE_TIMEOUT_MS);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:8,代码来源:TableLockChecker.java

示例4: checkTableLocks

import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter; //导入依赖的package包/类
public void checkTableLocks() throws IOException {
  TableLockManager tableLockManager
    = TableLockManager.createTableLockManager(zkWatcher.getConfiguration(), zkWatcher, null);
  final long expireDate = EnvironmentEdgeManager.currentTime() - expireTimeout;

  MetadataHandler handler = new MetadataHandler() {
    @Override
    public void handleMetadata(byte[] ownerMetadata) {
      ZooKeeperProtos.TableLock data = TableLockManager.fromBytes(ownerMetadata);
      String msg = "Table lock acquire attempt found:";
      if (data != null) {
         msg = msg +
            String.format("[tableName=%s:%s, lockOwner=%s, threadId=%s, " +
            "purpose=%s, isShared=%s, createTime=%s]",
            data.getTableName().getNamespace().toStringUtf8(),
            data.getTableName().getQualifier().toStringUtf8(),
            ProtobufUtil.toServerName(data.getLockOwner()), data.getThreadId(),
            data.getPurpose(), data.getIsShared(), data.getCreateTime());
      }

      if (data != null && data.hasCreateTime() && data.getCreateTime() < expireDate) {
        errorReporter.reportError(HBaseFsck.ErrorReporter.ERROR_CODE.EXPIRED_TABLE_LOCK, msg);
      } else {
        errorReporter.print(msg);
      }
    }
  };

  tableLockManager.visitAllLocks(handler);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:31,代码来源:TableLockChecker.java

示例5: report

import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter; //导入依赖的package包/类
/**
 * Print a human readable summary of hfile quarantining operations.
 * @param out
 */
public void report(ErrorReporter out) {
  out.print("Checked " + hfilesChecked.get() + " hfile for corruption");
  out.print("  HFiles corrupted:                  " + corrupted.size());
  if (inQuarantineMode) {
    out.print("    HFiles successfully quarantined: " + quarantined.size());
    for (Path sq : quarantined) {
      out.print("      " + sq);
    }
    out.print("    HFiles failed quarantine:        " + failures.size());
    for (Path fq : failures) {
      out.print("      " + fq);
    }
  }
  out.print("    HFiles moved while checking:     " + missing.size());
  for (Path mq : missing) {
    out.print("      " + mq);
  }

  String initialState = (corrupted.size() == 0) ? "OK" : "CORRUPTED";
  String fixedState = (corrupted.size() == quarantined.size()) ? "OK"
      : "CORRUPTED";

  if (inQuarantineMode) {
    out.print("Summary: " + initialState + " => " + fixedState);
  } else {
    out.print("Summary: " + initialState);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:33,代码来源:HFileCorruptionChecker.java

示例6: checkTableLocks

import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter; //导入依赖的package包/类
public void checkTableLocks() throws IOException {
  TableLockManager tableLockManager
    = TableLockManager.createTableLockManager(zkWatcher.getConfiguration(), zkWatcher, null);
  final long expireDate = EnvironmentEdgeManager.currentTime() - expireTimeout;

  MetadataHandler handler = new MetadataHandler() {
    @Override
    public void handleMetadata(byte[] ownerMetadata) {
      ZooKeeperProtos.TableLock data = TableLockManager.fromBytes(ownerMetadata);
      String msg = "Table lock acquire attempt found:";
      if (data != null) {
         msg = msg +
            String.format("[tableName=%s, lockOwner=%s, threadId=%s, " +
            "purpose=%s, isShared=%s, createTime=%s]", Bytes.toString(data.getTableName().toByteArray()),
            ProtobufUtil.toServerName(data.getLockOwner()), data.getThreadId(),
            data.getPurpose(), data.getIsShared(), data.getCreateTime());
      }

      if (data != null && data.hasCreateTime() && data.getCreateTime() < expireDate) {
        errorReporter.reportError(HBaseFsck.ErrorReporter.ERROR_CODE.EXPIRED_TABLE_LOCK, msg);
      } else {
        errorReporter.print(msg);
      }
    }
  };

  tableLockManager.visitAllLocks(handler);
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:29,代码来源:TableLockChecker.java

示例7: checkTableLocks

import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter; //导入依赖的package包/类
public void checkTableLocks() throws IOException {
  TableLockManager tableLockManager
    = TableLockManager.createTableLockManager(zkWatcher.getConfiguration(), zkWatcher, null);
  final long expireDate = EnvironmentEdgeManager.currentTimeMillis() - expireTimeout;

  MetadataHandler handler = new MetadataHandler() {
    @Override
    public void handleMetadata(byte[] ownerMetadata) {
      ZooKeeperProtos.TableLock data = TableLockManager.fromBytes(ownerMetadata);
      String msg = "Table lock acquire attempt found:";
      if (data != null) {
         msg = msg +
            String.format("[tableName=%s, lockOwner=%s, threadId=%s, " +
            "purpose=%s, isShared=%s, createTime=%s]", Bytes.toString(data.getTableName().toByteArray()),
            ProtobufUtil.toServerName(data.getLockOwner()), data.getThreadId(),
            data.getPurpose(), data.getIsShared(), data.getCreateTime());
      }

      if (data != null && data.hasCreateTime() && data.getCreateTime() < expireDate) {
        errorReporter.reportError(HBaseFsck.ErrorReporter.ERROR_CODE.EXPIRED_TABLE_LOCK, msg);
      } else {
        errorReporter.print(msg);
      }
    }
  };

  tableLockManager.visitAllLocks(handler);
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:29,代码来源:TableLockChecker.java

示例8: checkTableLocks

import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter; //导入依赖的package包/类
public void checkTableLocks() throws IOException {
  TableLockManager tableLockManager
    = TableLockManager.createTableLockManager(zkWatcher.getConfiguration(), zkWatcher, null);
  final long expireDate = EnvironmentEdgeManager.currentTimeMillis() - expireTimeout;

  MetadataHandler handler = new MetadataHandler() {
    @Override
    public void handleMetadata(byte[] ownerMetadata) {
      ZooKeeperProtos.TableLock data = TableLockManager.fromBytes(ownerMetadata);
      String msg = "Table lock acquire attempt found:";
      if (data != null) {
         msg = msg +
            String.format("[tableName=%s, lockOwner=%s, threadId=%s, " +
            "purpose=%s, isShared=%s, createTime=%s]", Bytes.toString(data.getTableName().toByteArray()),
            ProtobufUtil.toServerName(data.getLockOwner()), data.getThreadId(),
            data.getPurpose(), data.getIsShared(), data.getCreateTime());
      }

      if (data.hasCreateTime() && data.getCreateTime() < expireDate) {
        errorReporter.reportError(HBaseFsck.ErrorReporter.ERROR_CODE.EXPIRED_TABLE_LOCK, msg);
      } else {
        errorReporter.print(msg);
      }
    }
  };

  tableLockManager.visitAllLocks(handler);
}
 
开发者ID:cloud-software-foundation,项目名称:c5,代码行数:29,代码来源:TableLockChecker.java

示例9: getTableStoreFilePathMap

import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter; //导入依赖的package包/类
/**
 * Runs through the HBase rootdir and creates a reverse lookup map for
 * table StoreFile names to the full Path.
 * <br>
 * Example...<br>
 * Key = 3944417774205889744  <br>
 * Value = hdfs://localhost:51169/user/userid/-ROOT-/70236052/info/3944417774205889744
 *
 * @param fs  The file system to use.
 * @param hbaseRootDir  The root directory to scan.
 * @param sfFilter optional path filter to apply to store files
 * @param executor optional executor service to parallelize this operation
 * @param errors ErrorReporter instance or null
 * @return Map keyed by StoreFile name with a value of the full Path.
 * @throws IOException When scanning the directory fails.
 * @throws InterruptedException
 */
public static Map<String, Path> getTableStoreFilePathMap(
  final FileSystem fs, final Path hbaseRootDir, PathFilter sfFilter,
  ExecutorService executor, ErrorReporter errors)
throws IOException, InterruptedException {
  ConcurrentHashMap<String, Path> map = new ConcurrentHashMap<>(1024, 0.75f, 32);

  // if this method looks similar to 'getTableFragmentation' that is because
  // it was borrowed from it.

  // only include the directory paths to tables
  for (Path tableDir : FSUtils.getTableDirs(fs, hbaseRootDir)) {
    getTableStoreFilePathMap(map, fs, hbaseRootDir,
        FSUtils.getTableName(tableDir), sfFilter, executor, errors);
  }
  return map;
}
 
开发者ID:apache,项目名称:hbase,代码行数:34,代码来源:FSUtils.java

示例10: ReplicationChecker

import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter; //导入依赖的package包/类
public ReplicationChecker(Configuration conf, ZKWatcher zkw, ErrorReporter errorReporter) {
  this.peerStorage = ReplicationStorageFactory.getReplicationPeerStorage(zkw, conf);
  this.queueStorage = ReplicationStorageFactory.getReplicationQueueStorage(zkw, conf);
  this.errorReporter = errorReporter;
}
 
开发者ID:apache,项目名称:hbase,代码行数:6,代码来源:ReplicationChecker.java

示例11: hasUnDeletedQueues

import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter; //导入依赖的package包/类
public boolean hasUnDeletedQueues() {
  return errorReporter.getErrorList()
      .contains(HBaseFsck.ErrorReporter.ERROR_CODE.UNDELETED_REPLICATION_QUEUE);
}
 
开发者ID:apache,项目名称:hbase,代码行数:5,代码来源:ReplicationChecker.java


注:本文中的org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。