当前位置: 首页>>代码示例>>Java>>正文


Java SplitLogManager类代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.master.SplitLogManager的典型用法代码示例。如果您正苦于以下问题:Java SplitLogManager类的具体用法?Java SplitLogManager怎么用?Java SplitLogManager使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


SplitLogManager类属于org.apache.hadoop.hbase.master包,在下文中一共展示了SplitLogManager类的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: split

import org.apache.hadoop.hbase.master.SplitLogManager; //导入依赖的package包/类
public static List<Path> split(Path rootDir, Path logDir, Path oldLogDir,
    FileSystem fs, Configuration conf, final WALFactory factory) throws IOException {
  final FileStatus[] logfiles = SplitLogManager.getFileList(conf,
      Collections.singletonList(logDir), null);
  List<Path> splits = new ArrayList<Path>();
  if (logfiles != null && logfiles.length > 0) {
    for (FileStatus logfile: logfiles) {
      WALSplitter s = new WALSplitter(factory, conf, rootDir, fs, null, null,
          RecoveryMode.LOG_SPLITTING);
      if (s.splitLogFile(logfile, null)) {
        finishSplitLogFile(rootDir, oldLogDir, logfile.getPath(), conf);
        if (s.outputSink.splits != null) {
          splits.addAll(s.outputSink.splits);
        }
      }
    }
  }
  if (!fs.delete(logDir, true)) {
    throw new IOException("Unable to delete src dir: " + logDir);
  }
  return splits;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:WALSplitter.java

示例2: split

import org.apache.hadoop.hbase.master.SplitLogManager; //导入依赖的package包/类
@VisibleForTesting
public static List<Path> split(Path rootDir, Path logDir, Path oldLogDir,
    FileSystem fs, Configuration conf, final WALFactory factory) throws IOException {
  final FileStatus[] logfiles = SplitLogManager.getFileList(conf,
      Collections.singletonList(logDir), null);
  List<Path> splits = new ArrayList<>();
  if (ArrayUtils.isNotEmpty(logfiles)) {
    for (FileStatus logfile: logfiles) {
      WALSplitter s = new WALSplitter(factory, conf, rootDir, fs, null, null);
      if (s.splitLogFile(logfile, null)) {
        finishSplitLogFile(rootDir, oldLogDir, logfile.getPath(), conf);
        if (s.outputSink.splits != null) {
          splits.addAll(s.outputSink.splits);
        }
      }
    }
  }
  if (!fs.delete(logDir, true)) {
    throw new IOException("Unable to delete src dir: " + logDir);
  }
  return splits;
}
 
开发者ID:apache,项目名称:hbase,代码行数:23,代码来源:WALSplitter.java

示例3: SplitLogWorker

import org.apache.hadoop.hbase.master.SplitLogManager; //导入依赖的package包/类
public SplitLogWorker(ZooKeeperWatcher watcher, Configuration conf, RegionServerServices server,
    TaskExecutor splitTaskExecutor) {
  super(watcher);
  this.server = server;
  this.serverName = server.getServerName();
  this.splitTaskExecutor = splitTaskExecutor;
  report_period = conf.getInt("hbase.splitlog.report.period",
    conf.getInt("hbase.splitlog.manager.timeout", SplitLogManager.DEFAULT_TIMEOUT) / 3);
  this.conf = conf;
  this.executorService = this.server.getExecutorService();
  this.maxConcurrentTasks =
      conf.getInt("hbase.regionserver.wal.max.splitters", DEFAULT_MAX_SPLITTERS);
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:14,代码来源:SplitLogWorker.java

示例4: updateRecoveringRegionLastFlushedSequenceId

import org.apache.hadoop.hbase.master.SplitLogManager; //导入依赖的package包/类
/**
 * A helper function to store the last flushed sequence Id with the previous failed RS for a
 * recovering region. The Id is used to skip wal edits which are flushed. Since the flushed
 * sequence id is only valid for each RS, we associate the Id with corresponding failed RS.
 * @throws KeeperException
 * @throws IOException
 */
private void updateRecoveringRegionLastFlushedSequenceId(HRegion r) throws KeeperException,
    IOException {
  if (!r.isRecovering()) {
    // return immdiately for non-recovering regions
    return;
  }

  HRegionInfo region = r.getRegionInfo();
  ZooKeeperWatcher zkw = getZooKeeper();
  String previousRSName = this.getLastFailedRSFromZK(region.getEncodedName());
  Map<byte[], Long> maxSeqIdInStores = r.getMaxStoreSeqIdForLogReplay();
  long minSeqIdForLogReplay = -1;
  for (Long storeSeqIdForReplay : maxSeqIdInStores.values()) {
    if (minSeqIdForLogReplay == -1 || storeSeqIdForReplay < minSeqIdForLogReplay) {
      minSeqIdForLogReplay = storeSeqIdForReplay;
    }
  }
  long lastRecordedFlushedSequenceId = -1;
  String nodePath = ZKUtil.joinZNode(this.zooKeeper.recoveringRegionsZNode,
    region.getEncodedName());
  // recovering-region level
  byte[] data = ZKUtil.getData(zkw, nodePath);
  if (data != null) {
    lastRecordedFlushedSequenceId = SplitLogManager.parseLastFlushedSequenceIdFrom(data);
  }
  if (data == null || lastRecordedFlushedSequenceId < minSeqIdForLogReplay) {
    ZKUtil.setData(zkw, nodePath, ZKUtil.positionToByteArray(minSeqIdForLogReplay));
  }
  if (previousRSName != null) {
    // one level deeper for the failed RS
    nodePath = ZKUtil.joinZNode(nodePath, previousRSName);
    ZKUtil.setData(zkw, nodePath,
      ZKUtil.regionSequenceIdsToByteArray(minSeqIdForLogReplay, maxSeqIdInStores));
    LOG.debug("Update last flushed sequence id of region " + region.getEncodedName() + " for "
        + previousRSName);
  } else {
    LOG.warn("Can't find failed region server for recovering region " + region.getEncodedName());
  }
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:47,代码来源:HRegionServer.java

示例5: LogReplayOutputSink

import org.apache.hadoop.hbase.master.SplitLogManager; //导入依赖的package包/类
public LogReplayOutputSink(int numWriters) {
  super(numWriters);
  this.waitRegionOnlineTimeOut = conf.getInt("hbase.splitlog.manager.timeout",
    SplitLogManager.DEFAULT_TIMEOUT);
  this.logRecoveredEditsOutputSink = new LogRecoveredEditsOutputSink(numWriters);
  this.logRecoveredEditsOutputSink.setReporter(reporter);
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:8,代码来源:HLogSplitter.java

示例6: locateRegionAndRefreshLastFlushedSequenceId

import org.apache.hadoop.hbase.master.SplitLogManager; //导入依赖的package包/类
/**
 * Locate destination region based on table name & row. This function also makes sure the
 * destination region is online for replay.
 * @throws IOException
 */
private HRegionLocation locateRegionAndRefreshLastFlushedSequenceId(HConnection hconn,
    TableName table, byte[] row, String originalEncodedRegionName) throws IOException {
  // fetch location from cache
  HRegionLocation loc = onlineRegions.get(originalEncodedRegionName);
  if(loc != null) return loc;
  // fetch location from hbase:meta directly without using cache to avoid hit old dead server
  loc = hconn.getRegionLocation(table, row, true);
  if (loc == null) {
    throw new IOException("Can't locate location for row:" + Bytes.toString(row)
        + " of table:" + table);
  }
  // check if current row moves to a different region due to region merge/split
  if (!originalEncodedRegionName.equalsIgnoreCase(loc.getRegionInfo().getEncodedName())) {
    // originalEncodedRegionName should have already flushed
    lastFlushedSequenceIds.put(originalEncodedRegionName, Long.MAX_VALUE);
    HRegionLocation tmpLoc = onlineRegions.get(loc.getRegionInfo().getEncodedName());
    if (tmpLoc != null) return tmpLoc;
  }

  Long lastFlushedSequenceId = -1l;
  AtomicBoolean isRecovering = new AtomicBoolean(true);
  loc = waitUntilRegionOnline(loc, row, this.waitRegionOnlineTimeOut, isRecovering);
  if (!isRecovering.get()) {
    // region isn't in recovering at all because WAL file may contain a region that has
    // been moved to somewhere before hosting RS fails
    lastFlushedSequenceIds.put(loc.getRegionInfo().getEncodedName(), Long.MAX_VALUE);
    LOG.info("logReplay skip region: " + loc.getRegionInfo().getEncodedName()
        + " because it's not in recovering.");
  } else {
    Long cachedLastFlushedSequenceId =
        lastFlushedSequenceIds.get(loc.getRegionInfo().getEncodedName());

    // retrieve last flushed sequence Id from ZK. Because region postOpenDeployTasks will
    // update the value for the region
    RegionStoreSequenceIds ids =
        SplitLogManager.getRegionFlushedSequenceId(watcher, failedServerName, loc
            .getRegionInfo().getEncodedName());
    if (ids != null) {
      lastFlushedSequenceId = ids.getLastFlushedSequenceId();
      Map<byte[], Long> storeIds = new TreeMap<byte[], Long>(Bytes.BYTES_COMPARATOR);
      List<StoreSequenceId> maxSeqIdInStores = ids.getStoreSequenceIdList();
      for (StoreSequenceId id : maxSeqIdInStores) {
        storeIds.put(id.getFamilyName().toByteArray(), id.getSequenceId());
      }
      regionMaxSeqIdInStores.put(loc.getRegionInfo().getEncodedName(), storeIds);
    }

    if (cachedLastFlushedSequenceId == null
        || lastFlushedSequenceId > cachedLastFlushedSequenceId) {
      lastFlushedSequenceIds.put(loc.getRegionInfo().getEncodedName(), lastFlushedSequenceId);
    }
  }

  onlineRegions.put(loc.getRegionInfo().getEncodedName(), loc);
  return loc;
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:62,代码来源:HLogSplitter.java

示例7: updateRecoveringRegionLastFlushedSequenceId

import org.apache.hadoop.hbase.master.SplitLogManager; //导入依赖的package包/类
/**
 * A helper function to store the last flushed sequence Id with the previous failed RS for a
 * recovering region. The Id is used to skip wal edits which are flushed. Since the flushed
 * sequence id is only valid for each RS, we associate the Id with corresponding failed RS.
 * @throws KeeperException
 * @throws IOException
 */
private void updateRecoveringRegionLastFlushedSequenceId(HRegion r) throws KeeperException,
    IOException {
  if (!r.isRecovering()) {
    // return immdiately for non-recovering regions
    return;
  }

  HRegionInfo region = r.getRegionInfo();
  ZooKeeperWatcher zkw = getZooKeeper();
  String previousRSName = this.getLastFailedRSFromZK(region.getEncodedName());
  Map<byte[], Long> maxSeqIdInStores = r.getMaxStoreSeqIdForLogReplay();
  long minSeqIdForLogReplay = -1;
  for (Long storeSeqIdForReplay : maxSeqIdInStores.values()) {
    if (minSeqIdForLogReplay == -1 || storeSeqIdForReplay < minSeqIdForLogReplay) {
      minSeqIdForLogReplay = storeSeqIdForReplay;
    }
  }
  long lastRecordedFlushedSequenceId = -1;
  String nodePath = ZKUtil.joinZNode(this.zooKeeper.recoveringRegionsZNode,
    region.getEncodedName());
  // recovering-region level
  byte[] data;
  try {
    data = ZKUtil.getData(zkw, nodePath);
  } catch (InterruptedException e) {
    throw new InterruptedIOException();
  }
  if (data != null) {
    lastRecordedFlushedSequenceId = SplitLogManager.parseLastFlushedSequenceIdFrom(data);
  }
  if (data == null || lastRecordedFlushedSequenceId < minSeqIdForLogReplay) {
    ZKUtil.setData(zkw, nodePath, ZKUtil.positionToByteArray(minSeqIdForLogReplay));
  }
  if (previousRSName != null) {
    // one level deeper for the failed RS
    nodePath = ZKUtil.joinZNode(nodePath, previousRSName);
    ZKUtil.setData(zkw, nodePath,
      ZKUtil.regionSequenceIdsToByteArray(minSeqIdForLogReplay, maxSeqIdInStores));
    LOG.debug("Update last flushed sequence id of region " + region.getEncodedName() + " for "
        + previousRSName);
  } else {
    LOG.warn("Can't find failed region server for recovering region " + region.getEncodedName());
  }
}
 
开发者ID:shenli-uiuc,项目名称:PyroDB,代码行数:52,代码来源:HRegionServer.java

示例8: updateRecoveringRegionLastFlushedSequenceId

import org.apache.hadoop.hbase.master.SplitLogManager; //导入依赖的package包/类
/**
 * A helper function to store the last flushed sequence Id with the previous failed RS for a
 * recovering region. The Id is used to skip wal edits which are flushed. Since the flushed
 * sequence id is only valid for each RS, we associate the Id with corresponding failed RS.
 * @throws KeeperException
 * @throws IOException
 */
private void updateRecoveringRegionLastFlushedSequenceId(HRegion r) throws KeeperException,
    IOException {
  if (!r.isRecovering()) {
    // return immdiately for non-recovering regions
    return;
  }

  HRegionInfo region = r.getRegionInfo();
  ZooKeeperWatcher zkw = getZooKeeper();
  String previousRSName = this.getLastFailedRSFromZK(region.getEncodedName());
  Map<byte[], Long> maxSeqIdInStores = r.getMaxStoreSeqIdForLogReplay();
  long minSeqIdForLogReplay = -1;
  for (byte[] columnFamily : maxSeqIdInStores.keySet()) {
    Long storeSeqIdForReplay = maxSeqIdInStores.get(columnFamily);
    if (minSeqIdForLogReplay == -1 || storeSeqIdForReplay < minSeqIdForLogReplay) {
      minSeqIdForLogReplay = storeSeqIdForReplay;
    }
  }
  long lastRecordedFlushedSequenceId = -1;
  String nodePath = ZKUtil.joinZNode(this.zooKeeper.recoveringRegionsZNode,
    region.getEncodedName());
  // recovering-region level
  byte[] data = ZKUtil.getData(zkw, nodePath);
  if (data != null) {
    lastRecordedFlushedSequenceId = SplitLogManager.parseLastFlushedSequenceIdFrom(data);
  }
  if (data == null || lastRecordedFlushedSequenceId < minSeqIdForLogReplay) {
    ZKUtil.setData(zkw, nodePath, ZKUtil.positionToByteArray(minSeqIdForLogReplay));
  }
  if (previousRSName != null) {
    // one level deeper for the failed RS
    nodePath = ZKUtil.joinZNode(nodePath, previousRSName);
    ZKUtil.setData(zkw, nodePath,
      ZKUtil.regionSequenceIdsToByteArray(minSeqIdForLogReplay, maxSeqIdInStores));
    LOG.debug("Update last flushed sequence id of region " + region.getEncodedName() + " for "
        + previousRSName);
  } else {
    LOG.warn("Can't find failed region server for recovering region " + region.getEncodedName());
  }
}
 
开发者ID:cloud-software-foundation,项目名称:c5,代码行数:48,代码来源:HRegionServer.java

示例9: DistributedLogSplittingHelper

import org.apache.hadoop.hbase.master.SplitLogManager; //导入依赖的package包/类
public DistributedLogSplittingHelper(CancelableProgressable reporter) {
  this.splitReporter = reporter;
  report_period = conf.getInt("hbase.splitlog.report.period",
      conf.getInt("hbase.splitlog.manager.timeout",
          SplitLogManager.DEFAULT_TIMEOUT) / 2);
}
 
开发者ID:daidong,项目名称:DominoHBase,代码行数:7,代码来源:HLogSplitter.java


注:本文中的org.apache.hadoop.hbase.master.SplitLogManager类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。