当前位置: 首页>>代码示例>>Java>>正文


Java MonitoredTask.setStatus方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.monitoring.MonitoredTask.setStatus方法的典型用法代码示例。如果您正苦于以下问题:Java MonitoredTask.setStatus方法的具体用法?Java MonitoredTask.setStatus怎么用?Java MonitoredTask.setStatus使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.monitoring.MonitoredTask的用法示例。


在下文中一共展示了MonitoredTask.setStatus方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: close

import org.apache.hadoop.hbase.monitoring.MonitoredTask; //导入方法依赖的package包/类
/**
 * Close down this HRegion. Flush the cache unless abort parameter is true, Shut down each HStore,
 * don't service any more calls. This method could take some time to execute, so don't call it
 * from a time-sensitive thread.
 *
 * @param abort true if server is aborting (only during testing)
 * @return Vector of all the storage files that the HRegion's component HStores make use of. It's
 * a list of HStoreFile objects. Can be null if we are not to close at this time or we are
 * already closed.
 * @throws IOException              e
 * @throws DroppedSnapshotException Thrown when replay of wal is required because a Snapshot was
 *                                  not properly persisted. The region is put in closing mode, and the caller MUST abort
 *                                  after this.
 */
public Map<byte[], List<StoreFile>> close(final boolean abort) throws IOException {
  // Only allow one thread to close at a time. Serialize them so dual
  // threads attempting to close will run up against each other.
  MonitoredTask status =
      TaskMonitor.get().createStatus("Closing region " + this + (abort ? " due to abort" : ""));

  status.setStatus("Waiting for close lock");
  try {
    synchronized (closeLock) {
      return doClose(abort, status);
    }
  } finally {
    status.cleanup();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:30,代码来源:HRegion.java

示例2: commitFile

import org.apache.hadoop.hbase.monitoring.MonitoredTask; //导入方法依赖的package包/类
private StoreFile commitFile(final Path path, final long logCacheFlushId, MonitoredTask status)
    throws IOException {
  // Write-out finished successfully, move into the right spot
  Path dstPath = fs.commitStoreFile(getColumnFamilyName(), path, this, true);

  status.setStatus("Flushing " + this + ": reopening flushed file");
  StoreFile sf = createStoreFileAndReader(dstPath);

  StoreFile.Reader r = sf.getReader();
  this.storeSize += r.length();
  this.totalUncompressedBytes += r.getTotalUncompressedBytes();

  if (LOG.isInfoEnabled()) {
    LOG.info("Added " + sf + ", entries=" + r.getEntries() + ", sequenceid=" + logCacheFlushId
        + ", filesize=" + TraditionalBinaryPrefix.long2String(r.length(), "", 1));
  }
  return sf;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:HStore.java

示例3: close

import org.apache.hadoop.hbase.monitoring.MonitoredTask; //导入方法依赖的package包/类
/**
 * Close down this HRegion. Flush the cache unless abort parameter is true, Shut down each HStore,
 * don't service any more calls. This method could take some time to execute, so don't call it
 * from a time-sensitive thread.
 * @param abort true if server is aborting (only during testing)
 * @return Vector of all the storage files that the HRegion's component HStores make use of. It's
 *         a list of HStoreFile objects. Can be null if we are not to close at this time or we are
 *         already closed.
 * @throws IOException e
 */
public List<StoreFile> close(final boolean abort) throws IOException {
  // Only allow one thread to close at a time. Serialize them so dual
  // threads attempting to close will run up against each other.
  MonitoredTask status =
      TaskMonitor.get().createStatus("Closing region " + this + (abort ? " due to abort" : ""));

  status.setStatus("Waiting for close lock");
  try {
    synchronized (closeLock) {
      return doClose(abort, status);
    }
  } finally {
    status.cleanup();
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:26,代码来源:HRegion.java

示例4: commitFile

import org.apache.hadoop.hbase.monitoring.MonitoredTask; //导入方法依赖的package包/类
private StoreFile commitFile(final Path path,
    final long logCacheFlushId,
    TimeRangeTracker snapshotTimeRangeTracker,
    AtomicLong flushedSize,
    MonitoredTask status)
    throws IOException {
  // Write-out finished successfully, move into the right spot
  Path dstPath = fs.commitStoreFile(getColumnFamilyName(), path);

  status.setStatus("Flushing " + this + ": reopening flushed file");
  StoreFile sf = createStoreFileAndReader(dstPath);

  StoreFile.Reader r = sf.getReader();
  this.storeSize += r.length();
  this.totalUncompressedBytes += r.getTotalUncompressedBytes();

  if (LOG.isInfoEnabled()) {
    LOG.info("Added " + sf + ", entries=" + r.getEntries() +
      ", sequenceid=" + logCacheFlushId +
      ", filesize=" + StringUtils.humanReadableInt(r.length()));
  }
  return sf;
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:24,代码来源:HStore.java

示例5: close

import org.apache.hadoop.hbase.monitoring.MonitoredTask; //导入方法依赖的package包/类
/**
 * Close down this HRegion.  Flush the cache unless abort parameter is true,
 * Shut down each HStore, don't service any more calls.
 * <p/>
 * This method could take some time to execute, so don't call it from a
 * time-sensitive thread.
 *
 * @param abort true if server is aborting (only during testing)
 * @return Vector of all the storage files that the HRegion's component
 * HStores make use of.  It's a list of HStoreFile objects.  Can be null if
 * we are not to close at this time or we are already closed.
 * @throws IOException e
 */
public Map<byte[], List<StoreFile>> close(final boolean abort) throws IOException {
    // Only allow one thread to close at a time. Serialize them so dual
    // threads attempting to close will run up against each other.
    MonitoredTask status = TaskMonitor.get().createStatus(
            "Closing region " + this +
                    (abort ? " due to abort" : ""));

    status.setStatus("Waiting for close lock");
    try {
        synchronized (closeLock) {
            return doClose(abort, status);
        }
    } finally {
        status.cleanup();
    }
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:30,代码来源:HRegion.java

示例6: commitFile

import org.apache.hadoop.hbase.monitoring.MonitoredTask; //导入方法依赖的package包/类
private StoreFile commitFile(final Path path, final long logCacheFlushId, MonitoredTask status)
        throws IOException {
    // Write-out finished successfully, move into the right spot
    Path dstPath = fs.commitStoreFile(getColumnFamilyName(), path);

    status.setStatus("Flushing " + this + ": reopening flushed file");
    StoreFile sf = createStoreFileAndReader(dstPath);

    StoreFile.Reader r = sf.getReader();
    this.storeSize += r.length();
    this.totalUncompressedBytes += r.getTotalUncompressedBytes();

    if (LOG.isInfoEnabled()) {
        LOG.info("Added " + sf + ", entries=" + r.getEntries() +
                ", sequenceid=" + logCacheFlushId +
                ", filesize=" + StringUtils.humanReadableInt(r.length()));
    }
    return sf;
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:20,代码来源:HStore.java

示例7: close

import org.apache.hadoop.hbase.monitoring.MonitoredTask; //导入方法依赖的package包/类
/**
 * Close down this HRegion.  Flush the cache unless abort parameter is true,
 * Shut down each HStore, don't service any more calls.
 *
 * This method could take some time to execute, so don't call it from a
 * time-sensitive thread.
 *
 * @param abort true if server is aborting (only during testing)
 * @return Vector of all the storage files that the HRegion's component
 * HStores make use of.  It's a list of HStoreFile objects.  Can be null if
 * we are not to close at this time or we are already closed.
 *
 * @throws IOException e
 */
public Map<byte[], List<StoreFile>> close(final boolean abort) throws IOException {
  // Only allow one thread to close at a time. Serialize them so dual
  // threads attempting to close will run up against each other.
  MonitoredTask status = TaskMonitor.get().createStatus(
      "Closing region " + this +
      (abort ? " due to abort" : ""));

  status.setStatus("Waiting for close lock");
  try {
    synchronized (closeLock) {
      return doClose(abort, status);
    }
  } finally {
    status.cleanup();
  }
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:31,代码来源:HRegion.java

示例8: close

import org.apache.hadoop.hbase.monitoring.MonitoredTask; //导入方法依赖的package包/类
/**
 * Close down this HRegion.  Flush the cache unless abort parameter is true,
 * Shut down each HStore, don't service any more calls.
 *
 * This method could take some time to execute, so don't call it from a
 * time-sensitive thread.
 *
 * @param abort true if server is aborting (only during testing)
 * @return Vector of all the storage files that the HRegion's component
 * HStores make use of.  It's a list of HStoreFile objects.  Can be null if
 * we are not to close at this time or we are already closed.
 *
 * @throws IOException e
 */
public List<StoreFile> close(final boolean abort) throws IOException {
  // Only allow one thread to close at a time. Serialize them so dual
  // threads attempting to close will run up against each other.
  MonitoredTask status = TaskMonitor.get().createStatus(
      "Closing region " + this +
      (abort ? " due to abort" : ""));

  status.setStatus("Waiting for close lock");
  try {
    synchronized (closeLock) {
      return doClose(abort, status);
    }
  } finally {
    status.cleanup();
  }
}
 
开发者ID:wanhao,项目名称:IRIndex,代码行数:31,代码来源:HRegion.java

示例9: commitFile

import org.apache.hadoop.hbase.monitoring.MonitoredTask; //导入方法依赖的package包/类
private StoreFile commitFile(final Path path, final long logCacheFlushId, MonitoredTask status)
    throws IOException {
  // Write-out finished successfully, move into the right spot
  Path dstPath = fs.commitStoreFile(getColumnFamilyName(), path);

  status.setStatus("Flushing " + this + ": reopening flushed file");
  StoreFile sf = createStoreFileAndReader(dstPath);

  StoreFile.Reader r = sf.getReader();
  this.storeSize += r.length();
  this.totalUncompressedBytes += r.getTotalUncompressedBytes();

  if (LOG.isInfoEnabled()) {
    LOG.info("Added " + sf + ", entries=" + r.getEntries() +
      ", sequenceid=" + logCacheFlushId +
      ", filesize=" + StringUtils.humanReadableInt(r.length()));
  }
  return sf;
}
 
开发者ID:shenli-uiuc,项目名称:PyroDB,代码行数:20,代码来源:HStore.java

示例10: commitFile

import org.apache.hadoop.hbase.monitoring.MonitoredTask; //导入方法依赖的package包/类
/**
 * @param path The pathname of the tmp file into which the store was flushed
 * @param logCacheFlushId
 * @param status
 * @return store file created.
 * @throws IOException
 */
private HStoreFile commitFile(Path path, long logCacheFlushId, MonitoredTask status)
    throws IOException {
  // Write-out finished successfully, move into the right spot
  Path dstPath = fs.commitStoreFile(getColumnFamilyName(), path);

  status.setStatus("Flushing " + this + ": reopening flushed file");
  HStoreFile sf = createStoreFileAndReader(dstPath);

  StoreFileReader r = sf.getReader();
  this.storeSize.addAndGet(r.length());
  this.totalUncompressedBytes.addAndGet(r.getTotalUncompressedBytes());

  if (LOG.isInfoEnabled()) {
    LOG.info("Added " + sf + ", entries=" + r.getEntries() +
      ", sequenceid=" + logCacheFlushId +
      ", filesize=" + TraditionalBinaryPrefix.long2String(r.length(), "", 1));
  }
  return sf;
}
 
开发者ID:apache,项目名称:hbase,代码行数:27,代码来源:HStore.java

示例11: waitTasks

import org.apache.hadoop.hbase.monitoring.MonitoredTask; //导入方法依赖的package包/类
private void waitTasks(TaskBatch batch, MonitoredTask status) {
  synchronized (batch) {
    while ((batch.done + batch.error) != batch.installed) {
      try {
        status.setStatus("Waiting for distributed tasks to finish. "
            + " scheduled=" + batch.installed
            + " done=" + batch.done
            + " error=" + batch.error);
        batch.wait(100);
        if (stopper.isStopped()) {
          LOG.warn("Stopped while waiting for log splits to be completed");
          return;
        }
      } catch (InterruptedException e) {
        LOG.warn("Interrupted while waiting for log splits to be completed");
        Thread.currentThread().interrupt();
        return;
      }
    }
  }
}
 
开发者ID:lifeng5042,项目名称:RStore,代码行数:22,代码来源:SplitLogManager.java

示例12: waitForSplittingCompletion

import org.apache.hadoop.hbase.monitoring.MonitoredTask; //导入方法依赖的package包/类
private void waitForSplittingCompletion(TaskBatch batch, MonitoredTask status) {
  synchronized (batch) {
    while ((batch.done + batch.error) != batch.installed) {
      try {
        status.setStatus("Waiting for distributed tasks to finish. " + " scheduled="
            + batch.installed + " done=" + batch.done + " error=" + batch.error);
        int remaining = batch.installed - (batch.done + batch.error);
        int actual = activeTasks(batch);
        if (remaining != actual) {
          LOG.warn("Expected " + remaining + " active tasks, but actually there are " + actual);
        }
        int remainingTasks =
            ((BaseCoordinatedStateManager) server.getCoordinatedStateManager())
                .getSplitLogManagerCoordination().remainingTasksInCoordination();
        if (remainingTasks >= 0 && actual > remainingTasks) {
          LOG.warn("Expected at least" + actual + " tasks remaining, but actually there are "
              + remainingTasks);
        }
        if (remainingTasks == 0 || actual == 0) {
          LOG.warn("No more task remaining, splitting "
              + "should have completed. Remaining tasks is " + remainingTasks
              + ", active tasks in map " + actual);
          if (remainingTasks == 0 && actual == 0) {
            return;
          }
        }
        batch.wait(100);
        if (stopper.isStopped()) {
          LOG.warn("Stopped while waiting for log splits to be completed");
          return;
        }
      } catch (InterruptedException e) {
        LOG.warn("Interrupted while waiting for log splits to be completed");
        Thread.currentThread().interrupt();
        return;
      }
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:40,代码来源:SplitLogManager.java

示例13: initializeWarmup

import org.apache.hadoop.hbase.monitoring.MonitoredTask; //导入方法依赖的package包/类
private void initializeWarmup(final CancelableProgressable reporter) throws IOException {
  MonitoredTask status = TaskMonitor.get().createStatus("Initializing region " + this);
  // Initialize all the HStores
  status.setStatus("Warming up all the Stores");
  try {
    initializeStores(reporter, status);
  } finally {
    status.markComplete("Done warming up.");
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:11,代码来源:HRegion.java

示例14: finalizeWriter

import org.apache.hadoop.hbase.monitoring.MonitoredTask; //导入方法依赖的package包/类
protected void finalizeWriter(StoreFile.Writer writer, long cacheFlushSeqNum,
    MonitoredTask status) throws IOException {
  // Write out the log sequence number that corresponds to this output
  // hfile. Also write current time in metadata as minFlushTime.
  // The hfile is current up to and including cacheFlushSeqNum.
  status.setStatus("Flushing " + store + ": appending metadata");
  writer.appendMetadata(cacheFlushSeqNum, false);
  status.setStatus("Flushing " + store + ": closing flushed file");
  writer.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:11,代码来源:StoreFlusher.java

示例15: waitForSplittingCompletion

import org.apache.hadoop.hbase.monitoring.MonitoredTask; //导入方法依赖的package包/类
private void waitForSplittingCompletion(TaskBatch batch, MonitoredTask status) {
  synchronized (batch) {
    while ((batch.done + batch.error) != batch.installed) {
      try {
        status.setStatus("Waiting for distributed tasks to finish. "
            + " scheduled=" + batch.installed
            + " done=" + batch.done
            + " error=" + batch.error);
        int remaining = batch.installed - (batch.done + batch.error);
        int actual = activeTasks(batch);
        if (remaining != actual) {
          LOG.warn("Expected " + remaining
            + " active tasks, but actually there are " + actual);
        }
        int remainingInZK = remainingTasksInZK();
        if (remainingInZK >= 0 && actual > remainingInZK) {
          LOG.warn("Expected at least" + actual
            + " tasks in ZK, but actually there are " + remainingInZK);
        }
        if (remainingInZK == 0 || actual == 0) {
          LOG.warn("No more task remaining (ZK or task map), splitting "
            + "should have completed. Remaining tasks in ZK " + remainingInZK
            + ", active tasks in map " + actual);
          if (remainingInZK == 0 && actual == 0) {
            return;
          }
        }
        batch.wait(100);
        if (stopper.isStopped()) {
          LOG.warn("Stopped while waiting for log splits to be completed");
          return;
        }
      } catch (InterruptedException e) {
        LOG.warn("Interrupted while waiting for log splits to be completed");
        Thread.currentThread().interrupt();
        return;
      }
    }
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:41,代码来源:SplitLogManager.java


注:本文中的org.apache.hadoop.hbase.monitoring.MonitoredTask.setStatus方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。