当前位置: 首页>>代码示例>>Java>>正文


Java MonitoredTask.markComplete方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.monitoring.MonitoredTask.markComplete方法的典型用法代码示例。如果您正苦于以下问题:Java MonitoredTask.markComplete方法的具体用法?Java MonitoredTask.markComplete怎么用?Java MonitoredTask.markComplete使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.monitoring.MonitoredTask的用法示例。


在下文中一共展示了MonitoredTask.markComplete方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: initializeWarmup

import org.apache.hadoop.hbase.monitoring.MonitoredTask; //导入方法依赖的package包/类
private void initializeWarmup(final CancelableProgressable reporter) throws IOException {
  MonitoredTask status = TaskMonitor.get().createStatus("Initializing region " + this);
  // Initialize all the HStores
  status.setStatus("Warming up all the Stores");
  try {
    initializeStores(reporter, status);
  } finally {
    status.markComplete("Done warming up.");
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:11,代码来源:HRegion.java

示例2: flushcache

import org.apache.hadoop.hbase.monitoring.MonitoredTask; //导入方法依赖的package包/类
/**
 * Flush the cache. When this method is called the cache will be flushed unless:
 * <ol>
 * <li>the cache is empty</li>
 * <li>the region is closed.</li>
 * <li>a flush is already in progress</li>
 * <li>writes are disabled</li>
 * </ol>
 * <p>
 * This method may block for some time, so it should not be called from a time-sensitive thread.
 * @return true if the region needs compaction
 * @throws IOException general io exceptions
 * @throws DroppedSnapshotException Thrown when replay of hlog is required because a Snapshot was
 *           not properly persisted.
 */
public boolean flushcache() throws IOException {
  // fail-fast instead of waiting on the lock
  if (this.closing.get()) {
    LOG.debug("Skipping flush on " + this + " because closing");
    return false;
  }
  MonitoredTask status = TaskMonitor.get().createStatus("Flushing " + this);
  status.setStatus("Acquiring readlock on region");
  // block waiting for the lock for flushing cache
  lock.readLock().lock();
  try {
    if (this.closed.get()) {
      LOG.debug("Skipping flush on " + this + " because closed");
      status.abort("Skipped: closed");
      return false;
    }
    if (coprocessorHost != null) {
      status.setStatus("Running coprocessor pre-flush hooks");
      coprocessorHost.preFlush();
    }
    if (numPutsWithoutWAL.get() > 0) {
      numPutsWithoutWAL.set(0);
      dataInMemoryWithoutWAL.set(0);
    }
    synchronized (writestate) {
      if (!writestate.flushing && writestate.writesEnabled) {
        this.writestate.flushing = true;
      } else {
        if (LOG.isDebugEnabled()) {
          LOG.debug("NOT flushing memstore for region " + this + ", flushing="
              + writestate.flushing + ", writesEnabled=" + writestate.writesEnabled);
        }
        status.abort("Not flushing since "
            + (writestate.flushing ? "already flushing" : "writes not enabled"));
        return false;
      }
    }
    try {
      boolean result = internalFlushcache(status);

      if (coprocessorHost != null) {
        status.setStatus("Running post-flush coprocessor hooks");
        coprocessorHost.postFlush();
      }

      status.markComplete("Flush successful");
      return result;
    } finally {
      synchronized (writestate) {
        writestate.flushing = false;
        this.writestate.flushRequested = false;
        writestate.notifyAll();
      }
    }
  } finally {
    lock.readLock().unlock();
    status.cleanup();
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:75,代码来源:HRegion.java

示例3: initializeRegionInternals

import org.apache.hadoop.hbase.monitoring.MonitoredTask; //导入方法依赖的package包/类
private long initializeRegionInternals(final CancelableProgressable reporter,
                                       final MonitoredTask status) throws IOException, UnsupportedEncodingException {
    if (coprocessorHost != null) {
        status.setStatus("Running coprocessor pre-open hook");
        coprocessorHost.preOpen();
    }

    // Write HRI to a file in case we need to recover hbase:meta
    status.setStatus("Writing region info on filesystem");
    fs.checkRegionInfoOnFilesystem();


    // Initialize all the HStores
    status.setStatus("Initializing all the Stores");
    long maxSeqId = initializeRegionStores(reporter, status);

    this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
    this.writestate.flushRequested = false;
    this.writestate.compacting = 0;

    if (this.writestate.writesEnabled) {
        // Remove temporary data left over from old regions
        status.setStatus("Cleaning up temporary data from old regions");
        fs.cleanupTempDir();
    }

    if (this.writestate.writesEnabled) {
        status.setStatus("Cleaning up detritus from prior splits");
        // Get rid of any splits or merges that were lost in-progress.  Clean out
        // these directories here on open.  We may be opening a region that was
        // being split but we crashed in the middle of it all.
        fs.cleanupAnySplitDetritus();
        fs.cleanupMergesDir();
    }

    // Initialize split policy
    this.splitPolicy = RegionSplitPolicy.create(this, conf);

    this.lastFlushTime = EnvironmentEdgeManager.currentTime();
    // Use maximum of wal sequenceid or that which was found in stores
    // (particularly if no recovered edits, seqid will be -1).
    long nextSeqid = maxSeqId;

    // In distributedLogReplay mode, we don't know the last change sequence number because region
    // is opened before recovery completes. So we add a safety bumper to avoid new sequence number
    // overlaps used sequence numbers
    if (this.writestate.writesEnabled) {
        nextSeqid = WALSplitter.writeRegionSequenceIdFile(this.fs.getFileSystem(), this.fs
                .getRegionDir(), nextSeqid, (this.isRecovering ? (this.flushPerChanges + 10000000) : 1));
    } else {
        nextSeqid++;
    }

    LOG.info("Onlined " + this.getRegionInfo().getShortNameToLog() +
            "; next sequenceid=" + nextSeqid);

    // A region can be reopened if failed a split; reset flags
    this.closing.set(false);
    this.closed.set(false);

    if (coprocessorHost != null) {
        status.setStatus("Running coprocessor post-open hooks");
        coprocessorHost.postOpen();
    }

    status.markComplete("Region opened successfully");
    return nextSeqid;
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:69,代码来源:HRegion.java

示例4: initializeRegionInternals

import org.apache.hadoop.hbase.monitoring.MonitoredTask; //导入方法依赖的package包/类
private long initializeRegionInternals(final CancelableProgressable reporter,
    final MonitoredTask status) throws IOException, UnsupportedEncodingException {
  if (coprocessorHost != null) {
    status.setStatus("Running coprocessor pre-open hook");
    coprocessorHost.preOpen();
  }

  // Write HRI to a file in case we need to recover hbase:meta
  status.setStatus("Writing region info on filesystem");
  fs.checkRegionInfoOnFilesystem();

  // Remove temporary data left over from old regions
  status.setStatus("Cleaning up temporary data from old regions");
  fs.cleanupTempDir();

  // Initialize all the HStores
  status.setStatus("Initializing all the Stores");
  long maxSeqId = initializeRegionStores(reporter, status);

  status.setStatus("Cleaning up detritus from prior splits");
  // Get rid of any splits or merges that were lost in-progress.  Clean out
  // these directories here on open.  We may be opening a region that was
  // being split but we crashed in the middle of it all.
  fs.cleanupAnySplitDetritus();
  fs.cleanupMergesDir();

  this.writestate.setReadOnly(this.htableDescriptor.isReadOnly());
  this.writestate.flushRequested = false;
  this.writestate.compacting = 0;

  // Initialize split policy
  this.splitPolicy = RegionSplitPolicy.create(this, conf);

  this.lastFlushTime = EnvironmentEdgeManager.currentTimeMillis();
  // Use maximum of log sequenceid or that which was found in stores
  // (particularly if no recovered edits, seqid will be -1).
  long nextSeqid = maxSeqId + 1;
  if (this.isRecovering) {
    // In distributedLogReplay mode, we don't know the last change sequence number because region
    // is opened before recovery completes. So we add a safety bumper to avoid new sequence number
    // overlaps used sequence numbers
    nextSeqid += this.flushPerChanges + 10000000; // add another extra 10million
  }
  LOG.info("Onlined " + this.getRegionInfo().getShortNameToLog() +
    "; next sequenceid=" + nextSeqid);

  // A region can be reopened if failed a split; reset flags
  this.closing.set(false);
  this.closed.set(false);

  this.completeSequenceId = nextSeqid;
  if (coprocessorHost != null) {
    status.setStatus("Running coprocessor post-open hooks");
    coprocessorHost.postOpen();
  }

  status.markComplete("Region opened successfully");
  return nextSeqid;
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:60,代码来源:HRegion.java

示例5: flushcache

import org.apache.hadoop.hbase.monitoring.MonitoredTask; //导入方法依赖的package包/类
/**
 * Flush the cache.
 *
 * When this method is called the cache will be flushed unless:
 * <ol>
 *   <li>the cache is empty</li>
 *   <li>the region is closed.</li>
 *   <li>a flush is already in progress</li>
 *   <li>writes are disabled</li>
 * </ol>
 *
 * <p>This method may block for some time, so it should not be called from a
 * time-sensitive thread.
 *
 * @return true if the region needs compaction
 *
 * @throws IOException general io exceptions
 * @throws DroppedSnapshotException Thrown when replay of hlog is required
 * because a Snapshot was not properly persisted.
 */
public boolean flushcache() throws IOException {
  // fail-fast instead of waiting on the lock
  if (this.closing.get()) {
    LOG.debug("Skipping flush on " + this + " because closing");
    return false;
  }
  MonitoredTask status = TaskMonitor.get().createStatus("Flushing " + this);
  status.setStatus("Acquiring readlock on region");
  // block waiting for the lock for flushing cache
  lock.readLock().lock();
  try {
    if (this.closed.get()) {
      LOG.debug("Skipping flush on " + this + " because closed");
      status.abort("Skipped: closed");
      return false;
    }
    if (coprocessorHost != null) {
      status.setStatus("Running coprocessor pre-flush hooks");
      coprocessorHost.preFlush();
    }
    if (numPutsWithoutWAL.get() > 0) {
      numPutsWithoutWAL.set(0);
      dataInMemoryWithoutWAL.set(0);
    }
    synchronized (writestate) {
      if (!writestate.flushing && writestate.writesEnabled) {
        this.writestate.flushing = true;
      } else {
        if (LOG.isDebugEnabled()) {
          LOG.debug("NOT flushing memstore for region " + this
              + ", flushing=" + writestate.flushing + ", writesEnabled="
              + writestate.writesEnabled);
        }
        status.abort("Not flushing since "
            + (writestate.flushing ? "already flushing"
                : "writes not enabled"));
        return false;
      }
    }
    try {
      boolean result = internalFlushcache(status);

      if (coprocessorHost != null) {
        status.setStatus("Running post-flush coprocessor hooks");
        coprocessorHost.postFlush();
      }

      status.markComplete("Flush successful");
      return result;
    } finally {
      synchronized (writestate) {
        writestate.flushing = false;
        this.writestate.flushRequested = false;
        writestate.notifyAll();
      }
    }
  } finally {
    lock.readLock().unlock();
    status.cleanup();
  }
}
 
开发者ID:wanhao,项目名称:IRIndex,代码行数:82,代码来源:HRegion.java

示例6: splitLogDistributed

import org.apache.hadoop.hbase.monitoring.MonitoredTask; //导入方法依赖的package包/类
/**
 * The caller will block until all the log files of the given region server
 * have been processed - successfully split or an error is encountered - by an
 * available worker region server. This method must only be called after the
 * region servers have been brought online.
 *
 * @param logDirs
 * @throws IOException
 *          if there was an error while splitting any log file
 * @return cumulative size of the logfiles split
 */
public long splitLogDistributed(final List<Path> logDirs) throws IOException {
  MonitoredTask status = TaskMonitor.get().createStatus(
        "Doing distributed log split in " + logDirs);
  FileStatus[] logfiles = getFileList(logDirs);
  status.setStatus("Checking directory contents...");
  LOG.debug("Scheduling batch of logs to split");
  tot_mgr_log_split_batch_start.incrementAndGet();
  LOG.info("started splitting logs in " + logDirs);
  long t = EnvironmentEdgeManager.currentTimeMillis();
  long totalSize = 0;
  TaskBatch batch = new TaskBatch();
  for (FileStatus lf : logfiles) {
    // TODO If the log file is still being written to - which is most likely
    // the case for the last log file - then its length will show up here
    // as zero. The size of such a file can only be retrieved after
    // recover-lease is done. totalSize will be under in most cases and the
    // metrics that it drives will also be under-reported.
    totalSize += lf.getLen();
    if (installTask(lf.getPath().toString(), batch) == false) {
      throw new IOException("duplicate log split scheduled for "
          + lf.getPath());
    }
  }
  waitTasks(batch, status);
  if (batch.done != batch.installed) {
    batch.isDead = true;
    tot_mgr_log_split_batch_err.incrementAndGet();
    LOG.warn("error while splitting logs in " + logDirs +
    " installed = " + batch.installed + " but only " + batch.done + " done");
    throw new IOException("error or interrupt while splitting logs in "
        + logDirs + " Task = " + batch);
  }
  for(Path logDir: logDirs){
    status.setStatus("Cleaning up log directory...");
    try {
      if (fs.exists(logDir) && !fs.delete(logDir, false)) {
        LOG.warn("Unable to delete log src dir. Ignoring. " + logDir);
      }
    } catch (IOException ioe) {
      FileStatus[] files = fs.listStatus(logDir);
      if (files != null && files.length > 0) {
        LOG.warn("returning success without actually splitting and " + 
            "deleting all the log files in path " + logDir);
      } else {
        LOG.warn("Unable to delete log src dir. Ignoring. " + logDir, ioe);
      }
    }
    tot_mgr_log_split_batch_success.incrementAndGet();
  }
  String msg = "finished splitting (more than or equal to) " + totalSize +
      " bytes in " + batch.installed + " log files in " + logDirs + " in " +
      (EnvironmentEdgeManager.currentTimeMillis() - t) + "ms";
  status.markComplete(msg);
  LOG.info(msg);
  return totalSize;
}
 
开发者ID:lifeng5042,项目名称:RStore,代码行数:68,代码来源:SplitLogManager.java

示例7: doClose

import org.apache.hadoop.hbase.monitoring.MonitoredTask; //导入方法依赖的package包/类
private List<StoreFile> doClose(
    final boolean abort, MonitoredTask status)
throws IOException {
  if (isClosed()) {
    LOG.warn("Region " + this + " already closed");
    return null;
  }

  if (coprocessorHost != null) {
    status.setStatus("Running coprocessor pre-close hooks");
    this.coprocessorHost.preClose(abort);
  }

  status.setStatus("Disabling compacts and flushes for region");
  boolean wasFlushing = false;
  synchronized (writestate) {
    // Disable compacting and flushing by background threads for this
    // region.
    writestate.writesEnabled = false;
    wasFlushing = writestate.flushing;
    LOG.debug("Closing " + this + ": disabling compactions & flushes");
    while (writestate.compacting > 0 || writestate.flushing) {
      LOG.debug("waiting for " + writestate.compacting + " compactions" +
          (writestate.flushing ? " & cache flush" : "") +
          " to complete for region " + this);
      try {
        writestate.wait();
      } catch (InterruptedException iex) {
        // continue
      }
    }
  }
  // If we were not just flushing, is it worth doing a preflush...one
  // that will clear out of the bulk of the memstore before we put up
  // the close flag?
  if (!abort && !wasFlushing && worthPreFlushing()) {
    status.setStatus("Pre-flushing region before close");
    LOG.info("Running close preflush of " + this.getRegionNameAsString());
    internalFlushcache(status);
  }

  this.closing.set(true);
  status.setStatus("Disabling writes for close");
  lock.writeLock().lock();
  try {
    if (this.isClosed()) {
      status.abort("Already got closed by another process");
      // SplitTransaction handles the null
      return null;
    }
    LOG.debug("Updates disabled for region " + this);
    // Don't flush the cache if we are aborting
    if (!abort) {
      internalFlushcache(status);
    }

    List<StoreFile> result = new ArrayList<StoreFile>();
    for (Store store : stores.values()) {
      result.addAll(store.close());
    }
    this.closed.set(true);

    if (coprocessorHost != null) {
      status.setStatus("Running coprocessor post-close hooks");
      this.coprocessorHost.postClose(abort);
    }
    status.markComplete("Closed");
    LOG.info("Closed " + this);
    return result;
  } finally {
    lock.writeLock().unlock();
  }
}
 
开发者ID:lifeng5042,项目名称:RStore,代码行数:74,代码来源:HRegion.java

示例8: flushcache

import org.apache.hadoop.hbase.monitoring.MonitoredTask; //导入方法依赖的package包/类
/**
 * Flush the cache.
 *
 * When this method is called the cache will be flushed unless:
 * <ol>
 *   <li>the cache is empty</li>
 *   <li>the region is closed.</li>
 *   <li>a flush is already in progress</li>
 *   <li>writes are disabled</li>
 * </ol>
 *
 * <p>This method may block for some time, so it should not be called from a
 * time-sensitive thread.
 *
 * @return true if cache was flushed
 *
 * @throws IOException general io exceptions
 * @throws DroppedSnapshotException Thrown when replay of hlog is required
 * because a Snapshot was not properly persisted.
 */
public boolean flushcache() throws IOException {
  // fail-fast instead of waiting on the lock
  if (this.closing.get()) {
    LOG.debug("Skipping flush on " + this + " because closing");
    return false;
  }
  MonitoredTask status = TaskMonitor.get().createStatus("Flushing " + this);
  status.setStatus("Acquiring readlock on region");
  lock.readLock().lock();
  try {
    if (this.closed.get()) {
      LOG.debug("Skipping flush on " + this + " because closed");
      status.abort("Skipped: closed");
      return false;
    }
    if (coprocessorHost != null) {
      status.setStatus("Running coprocessor pre-flush hooks");
      coprocessorHost.preFlush();
    }
    try {
      synchronized (writestate) {
        if (!writestate.flushing && writestate.writesEnabled) {
          this.writestate.flushing = true;
        } else {
          if (LOG.isDebugEnabled()) {
            LOG.debug("NOT flushing memstore for region " + this +
                ", flushing=" +
                writestate.flushing + ", writesEnabled=" +
                writestate.writesEnabled);
          }
          status.abort("Not flushing since " +
              (writestate.flushing ? "already flushing" : "writes not enabled"));
          return false;
        }
      }
      boolean result = internalFlushcache(status);

      if (coprocessorHost != null) {
        status.setStatus("Running post-flush coprocessor hooks");
        coprocessorHost.postFlush();
      }

      status.markComplete("Flush successful");
      return result;
    } finally {
      synchronized (writestate) {
        writestate.flushing = false;
        this.writestate.flushRequested = false;
        writestate.notifyAll();
      }
    }
  } finally {
    lock.readLock().unlock();
    status.cleanup();
  }
}
 
开发者ID:lifeng5042,项目名称:RStore,代码行数:77,代码来源:HRegion.java

示例9: initializeRegionInternals

import org.apache.hadoop.hbase.monitoring.MonitoredTask; //导入方法依赖的package包/类
private long initializeRegionInternals(final CancelableProgressable reporter,
    final MonitoredTask status) throws IOException, UnsupportedEncodingException {
  if (coprocessorHost != null) {
    status.setStatus("Running coprocessor pre-open hook");
    coprocessorHost.preOpen();
  }

  // Write HRI to a file in case we need to recover hbase:meta
  status.setStatus("Writing region info on filesystem");
  fs.checkRegionInfoOnFilesystem();

  // Remove temporary data left over from old regions
  status.setStatus("Cleaning up temporary data from old regions");
  fs.cleanupTempDir();

  // Initialize all the HStores
  status.setStatus("Initializing all the Stores");
  long maxSeqId = initializeRegionStores(reporter, status);

  status.setStatus("Cleaning up detritus from prior splits");
  // Get rid of any splits or merges that were lost in-progress.  Clean out
  // these directories here on open.  We may be opening a region that was
  // being split but we crashed in the middle of it all.
  fs.cleanupAnySplitDetritus();
  fs.cleanupMergesDir();

  this.writestate.setReadOnly(this.htableDescriptor.isReadOnly());
  this.writestate.flushRequested = false;
  this.writestate.compacting = 0;

  // Initialize split policy
  this.splitPolicy = RegionSplitPolicy.create(this, conf);

  this.lastFlushTime = EnvironmentEdgeManager.currentTimeMillis();
  // Use maximum of log sequenceid or that which was found in stores
  // (particularly if no recovered edits, seqid will be -1).
  long nextSeqid = maxSeqId + 1;
  if (this.isRecovering) {
    // In distributedLogReplay mode, we don't know the last change sequence number because region
    // is opened before recovery completes. So we add a safety bumper to avoid new sequence number
    // overlaps used sequence numbers
    nextSeqid += this.flushPerChanges + 10000000; // add another extra 10million
  }
  LOG.info("Onlined " + this.getRegionInfo().getShortNameToLog() +
    "; next sequenceid=" + nextSeqid);

  // A region can be reopened if failed a split; reset flags
  this.closing.set(false);
  this.closed.set(false);

  this.lastFlushSeqId = nextSeqid;
  if (coprocessorHost != null) {
    status.setStatus("Running coprocessor post-open hooks");
    coprocessorHost.postOpen();
  }

  status.markComplete("Region opened successfully");
  return nextSeqid;
}
 
开发者ID:shenli-uiuc,项目名称:PyroDB,代码行数:60,代码来源:HRegion.java

示例10: initializeRegionInternals

import org.apache.hadoop.hbase.monitoring.MonitoredTask; //导入方法依赖的package包/类
private long initializeRegionInternals(final CancelableProgressable reporter,
    final MonitoredTask status) throws IOException, UnsupportedEncodingException {
  if (coprocessorHost != null) {
    status.setStatus("Running coprocessor pre-open hook");
    coprocessorHost.preOpen();
  }

  // Write HRI to a file in case we need to recover hbase:meta
  status.setStatus("Writing region info on filesystem");
  fs.checkRegionInfoOnFilesystem();

  // Remove temporary data left over from old regions
  status.setStatus("Cleaning up temporary data from old regions");
  fs.cleanupTempDir();

  // Initialize all the HStores
  status.setStatus("Initializing all the Stores");
  long maxSeqId = initializeRegionStores(reporter, status);

  status.setStatus("Cleaning up detritus from prior splits");
  // Get rid of any splits or merges that were lost in-progress.  Clean out
  // these directories here on open.  We may be opening a region that was
  // being split but we crashed in the middle of it all.
  fs.cleanupAnySplitDetritus();
  fs.cleanupMergesDir();

  this.writestate.setReadOnly(this.htableDescriptor.isReadOnly());
  this.writestate.flushRequested = false;
  this.writestate.compacting = 0;

  // Initialize split policy
  this.splitPolicy = RegionSplitPolicy.create(this, conf);

  this.lastFlushTime = EnvironmentEdgeManager.currentTimeMillis();
  // Use maximum of log sequenceid or that which was found in stores
  // (particularly if no recovered edits, seqid will be -1).
  long nextSeqid = maxSeqId + 1;
  LOG.info("Onlined " + this.getRegionInfo().getShortNameToLog() +
    "; next sequenceid=" + nextSeqid);

  // A region can be reopened if failed a split; reset flags
  this.closing.set(false);
  this.closed.set(false);

  if (coprocessorHost != null) {
    status.setStatus("Running coprocessor post-open hooks");
    coprocessorHost.postOpen();
  }

  status.markComplete("Region opened successfully");
  return nextSeqid;
}
 
开发者ID:cloud-software-foundation,项目名称:c5,代码行数:53,代码来源:HRegion.java

示例11: flushcache

import org.apache.hadoop.hbase.monitoring.MonitoredTask; //导入方法依赖的package包/类
/**
 * Flush the cache.
 *
 * When this method is called the cache will be flushed unless:
 * <ol>
 *   <li>the cache is empty</li>
 *   <li>the region is closed.</li>
 *   <li>a flush is already in progress</li>
 *   <li>writes are disabled</li>
 * </ol>
 *
 * <p>This method may block for some time, so it should not be called from a
 * time-sensitive thread.
 *
 * @return true if the region needs compacting
 *
 * @throws IOException general io exceptions
 * @throws DroppedSnapshotException Thrown when replay of hlog is required
 * because a Snapshot was not properly persisted.
 */
public boolean flushcache() throws IOException {
  // fail-fast instead of waiting on the lock
  if (this.closing.get()) {
    LOG.debug("Skipping flush on " + this + " because closing");
    return false;
  }
  MonitoredTask status = TaskMonitor.get().createStatus("Flushing " + this);
  status.setStatus("Acquiring readlock on region");
  // block waiting for the lock for flushing cache
  lock.readLock().lock();
  try {
    if (this.closed.get()) {
      LOG.debug("Skipping flush on " + this + " because closed");
      status.abort("Skipped: closed");
      return false;
    }
    if (coprocessorHost != null) {
      status.setStatus("Running coprocessor pre-flush hooks");
      coprocessorHost.preFlush();
    }
    if (numMutationsWithoutWAL.get() > 0) {
      numMutationsWithoutWAL.set(0);
      dataInMemoryWithoutWAL.set(0);
    }
    synchronized (writestate) {
      if (!writestate.flushing && writestate.writesEnabled) {
        this.writestate.flushing = true;
      } else {
        if (LOG.isDebugEnabled()) {
          LOG.debug("NOT flushing memstore for region " + this
              + ", flushing=" + writestate.flushing + ", writesEnabled="
              + writestate.writesEnabled);
        }
        status.abort("Not flushing since "
            + (writestate.flushing ? "already flushing"
                : "writes not enabled"));
        return false;
      }
    }
    try {
      boolean result = internalFlushcache(status);

      if (coprocessorHost != null) {
        status.setStatus("Running post-flush coprocessor hooks");
        coprocessorHost.postFlush();
      }

      status.markComplete("Flush successful");
      return result;
    } finally {
      synchronized (writestate) {
        writestate.flushing = false;
        this.writestate.flushRequested = false;
        writestate.notifyAll();
      }
    }
  } finally {
    lock.readLock().unlock();
    status.cleanup();
  }
}
 
开发者ID:cloud-software-foundation,项目名称:c5,代码行数:82,代码来源:HRegion.java


注:本文中的org.apache.hadoop.hbase.monitoring.MonitoredTask.markComplete方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。