当前位置: 首页>>代码示例>>Java>>正文


Java EnvironmentEdgeManager.currentTime方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.util.EnvironmentEdgeManager.currentTime方法的典型用法代码示例。如果您正苦于以下问题:Java EnvironmentEdgeManager.currentTime方法的具体用法?Java EnvironmentEdgeManager.currentTime怎么用?Java EnvironmentEdgeManager.currentTime使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.util.EnvironmentEdgeManager的用法示例。


在下文中一共展示了EnvironmentEdgeManager.currentTime方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: removeExpiredKeys

import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; //导入方法依赖的package包/类
synchronized void removeExpiredKeys() {
  if (!leaderElector.isMaster()) {
    LOG.info("Skipping removeExpiredKeys() because not running as master.");
    return;
  }

  long now = EnvironmentEdgeManager.currentTime();
  Iterator<AuthenticationKey> iter = allKeys.values().iterator();
  while (iter.hasNext()) {
    AuthenticationKey key = iter.next();
    if (key.getExpiration() < now) {
      if (LOG.isDebugEnabled()) {
        LOG.debug("Removing expired key "+key.getKeyId());
      }
      iter.remove();
      zkWatcher.removeKeyFromZK(key);
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:AuthenticationTokenSecretManager.java

示例2: run

import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; //导入方法依赖的package包/类
public void run() {
  zkLeader.start();
  zkLeader.waitToBecomeLeader();
  isMaster = true;

  while (!stopped) {
    long now = EnvironmentEdgeManager.currentTime();

    // clear any expired
    removeExpiredKeys();
    long localLastKeyUpdate = getLastKeyUpdate();
    if (localLastKeyUpdate + keyUpdateInterval < now) {
      // roll a new master key
      rollCurrentKey();
    }

    try {
      Thread.sleep(5000);
    } catch (InterruptedException ie) {
      if (LOG.isDebugEnabled()) {
        LOG.debug("Interrupted waiting for next update", ie);
      }
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:26,代码来源:AuthenticationTokenSecretManager.java

示例3: isFileDeletable

import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; //导入方法依赖的package包/类
@Override
public boolean isFileDeletable(FileStatus fStat) {
  long currentTime = EnvironmentEdgeManager.currentTime();
  long time = fStat.getModificationTime();
  long life = currentTime - time;
  if (LOG.isTraceEnabled()) {
    LOG.trace("HFile life:" + life + ", ttl:" + ttl + ", current:" + currentTime + ", from: "
        + time);
  }
  if (life < 0) {
    LOG.warn("Found a hfile (" + fStat.getPath() + ") newer than current time (" + currentTime
        + " < " + time + "), probably a clock skew");
    return false;
  }
  return life > ttl;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:17,代码来源:TimeToLiveHFileCleaner.java

示例4: StoreScanner

import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; //导入方法依赖的package包/类
/**
 * An internal constructor.
 */
protected StoreScanner(Store store, Scan scan, final ScanInfo scanInfo,
    final NavigableSet<byte[]> columns, long readPt, boolean cacheBlocks) {
  this.readPt = readPt;
  this.store = store;
  this.cacheBlocks = cacheBlocks;
  get = scan.isGetScan();
  int numCol = columns == null ? 0 : columns.size();
  explicitColumnQuery = numCol > 0;
  this.scan = scan;
  this.columns = columns;
  this.now = EnvironmentEdgeManager.currentTime();
  this.oldestUnexpiredTS = now - scanInfo.getTtl();
  this.minVersions = scanInfo.getMinVersions();

  // We look up row-column Bloom filters for multi-column queries as part of
  // the seek operation. However, we also look the row-column Bloom filter
  // for multi-row (non-"get") scans because this is not done in
  // StoreFile.passesBloomFilter(Scan, SortedSet<byte[]>).
  this.useRowColBloom = numCol > 1 || (!get && numCol == 1);

  this.maxRowSize = scanInfo.getTableMaxRowSize();
  this.scanUsePread = scan.isSmall() ? true : scanInfo.isUsePread();
  this.cellsPerHeartbeatCheck = scanInfo.getCellsPerTimeoutCheck();
  // Parallel seeking is on if the config allows and more there is more than one store file.
  if (this.store != null && this.store.getStorefilesCount() > 1) {
    RegionServerServices rsService = ((HStore) store).getHRegion().getRegionServerServices();
    if (rsService != null && scanInfo.isParallelSeekEnabled()) {
      this.parallelSeekEnabled = true;
      this.executor = rsService.getExecutorService();
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:36,代码来源:StoreScanner.java

示例5: setTimeoutFailure

import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; //导入方法依赖的package包/类
@InterfaceAudience.Private
protected synchronized boolean setTimeoutFailure() {
  if (state == ProcedureState.WAITING_TIMEOUT) {
    long timeDiff = EnvironmentEdgeManager.currentTime() - lastUpdate;
    setFailure("ProcedureExecutor", new TimeoutIOException(
      "Operation timed out after " + StringUtils.humanTimeDiff(timeDiff)));
    return true;
  }
  return false;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:11,代码来源:Procedure.java

示例6: setProcId

import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; //导入方法依赖的package包/类
/**
 * Called by the ProcedureExecutor to assign the ID to the newly created procedure.
 */
@VisibleForTesting
@InterfaceAudience.Private
protected void setProcId(final long procId) {
  this.procId = procId;
  this.startTime = EnvironmentEdgeManager.currentTime();
  setState(ProcedureState.RUNNABLE);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:11,代码来源:Procedure.java

示例7: progress

import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; //导入方法依赖的package包/类
@Override
public boolean progress() {
  long now = EnvironmentEdgeManager.currentTime();
  if (now - lastLog > this.interval) {
    LOG.info("Opening " + this.hri.getRegionNameAsString());
    this.lastLog = now;
  }
  return true;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:10,代码来源:SplitTransactionImpl.java

示例8: getData

import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; //导入方法依赖的package包/类
/**
 * Get the maintained data. In case of any ZK exceptions this will retry
 * establishing the connection (but not more than twice/minute).
 *
 * getData is on the critical path, so make sure it is fast unless there is
 * a problem (network partion, ZK ensemble down, etc)
 * Make sure at most one (unlucky) thread retries and other threads don't pile up
 * while that threads tries to recreate the connection.
 *
 * @return the last know version of the data
 */
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="REC_CATCH_EXCEPTION")
public byte[] getData() {
  // try at most twice/minute
  if (needSetup && EnvironmentEdgeManager.currentTime() > lastSetupTry + 30000) {
    synchronized (this) {
      // make sure only one thread tries to reconnect
      if (needSetup) {
        needSetup = false;
      } else {
        return data;
      }
    }
    // do this without the lock held to avoid threads piling up on this lock,
    // as it can take a while
    try {
      LOG.debug("Connecting to ZK");
      // record this attempt
      lastSetupTry = EnvironmentEdgeManager.currentTime();
      if (zk.exists(node, false) != null) {
        data = zk.getData(node, this, null);
        LOG.debug("Read synchronously: "+(data == null ? "null" : Bytes.toLong(data)));
      } else {
        zk.exists(node, this);
      }
    } catch (Exception x) {
      // try again if this fails
      needSetup = true;
    }
  }
  return data;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:43,代码来源:ZooKeeperScanPolicyObserver.java

示例9: shouldFlush

import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; //导入方法依赖的package包/类
/**
 * Should the memstore be flushed now
 */
boolean shouldFlush(final StringBuffer whyFlush) {
  whyFlush.setLength(0);
  // This is a rough measure.
  if (this.maxFlushedSeqId > 0 && (this.maxFlushedSeqId + this.flushPerChanges < this.mvcc
      .getReadPoint())) {
    whyFlush.append("more than max edits, " + this.flushPerChanges + ", since last flush");
    return true;
  }
  long modifiedFlushCheckInterval = flushCheckInterval;
  if (getRegionInfo().isSystemTable()
      && getRegionInfo().getReplicaId() == HRegionInfo.DEFAULT_REPLICA_ID) {
    modifiedFlushCheckInterval = SYSTEM_CACHE_FLUSH_INTERVAL;
  }
  if (modifiedFlushCheckInterval <= 0) { // disabled
    return false;
  }
  long now = EnvironmentEdgeManager.currentTime();
  // if we flushed in the recent past, we don't need to do again now
  if ((now - getEarliestFlushTimeForAllStores() < modifiedFlushCheckInterval)) {
    return false;
  }
  // since we didn't flush in the recent past, flush now if certain conditions
  // are met. Return true on first such memstore hit.
  for (Store s : getStores()) {
    if (s.timeOfOldestEdit() < now - modifiedFlushCheckInterval) {
      // we have an old enough edit in the memstore, flush
      whyFlush.append(s.toString() + " has an old edit so flush to free WALs");
      return true;
    }
  }
  return false;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:36,代码来源:HRegion.java

示例10: getWaitInterval

import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; //导入方法依赖的package包/类
@Override
public long getWaitInterval(long limit, long available, long amount) {
  if (nextRefillTime == -1) {
    return 0;
  }
  final long now = EnvironmentEdgeManager.currentTime();
  final long refillTime = nextRefillTime;
  return refillTime - now;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:10,代码来源:FixedIntervalRateLimiter.java

示例11: splitRegion

import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; //导入方法依赖的package包/类
/**
 * Split a region on the region server.
 *
 * @param controller the RPC controller
 * @param request the request
 * @throws ServiceException
 */
@Override
@QosPriority(priority=HConstants.ADMIN_QOS)
public SplitRegionResponse splitRegion(final RpcController controller,
    final SplitRegionRequest request) throws ServiceException {
  try {
    checkOpen();
    requestCount.increment();
    Region region = getRegion(request.getRegion());
    region.startRegionOperation(Operation.SPLIT_REGION);
    if (region.getRegionInfo().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) {
      throw new IOException("Can't split replicas directly. "
          + "Replicas are auto-split when their primary is split.");
    }
    LOG.info("Splitting " + region.getRegionInfo().getRegionNameAsString());
    long startTime = EnvironmentEdgeManager.currentTime();
    FlushResult flushResult = region.flush(true);
    if (flushResult.isFlushSucceeded()) {
      long endTime = EnvironmentEdgeManager.currentTime();
      regionServer.metricsRegionServer.updateFlushTime(endTime - startTime);
    }
    byte[] splitPoint = null;
    if (request.hasSplitPoint()) {
      splitPoint = request.getSplitPoint().toByteArray();
    }
    ((HRegion)region).forceSplit(splitPoint);
    regionServer.compactSplitThread.requestSplit(region, ((HRegion)region).checkSplit(),
      RpcServer.getRequestUser());
    return SplitRegionResponse.newBuilder().build();
  } catch (DroppedSnapshotException ex) {
    regionServer.abort("Replay of WAL required. Forcing server shutdown", ex);
    throw new ServiceException(ex);
  } catch (IOException ie) {
    throw new ServiceException(ie);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:43,代码来源:RSRpcServices.java

示例12: JournalEntryImpl

import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; //导入方法依赖的package包/类
public JournalEntryImpl(RegionMergeTransactionPhase type) {
  this(type, EnvironmentEdgeManager.currentTime());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:4,代码来源:RegionMergeTransactionImpl.java

示例13: assertBulkLoadHFileOk

import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; //导入方法依赖的package包/类
@Override public void assertBulkLoadHFileOk(Path srcPath) throws IOException {
  HFile.Reader reader = null;
  try {
    LOG.info(
        "Validating hfile at " + srcPath + " for inclusion in " + "store " + this + " region "
            + this.getRegionInfo().getRegionNameAsString());
    reader = HFile.createReader(srcPath.getFileSystem(conf), srcPath, cacheConf, conf);
    reader.loadFileInfo();

    byte[] firstKey = reader.getFirstRowKey();
    Preconditions.checkState(firstKey != null, "First key can not be null");
    byte[] lk = reader.getLastKey();
    Preconditions.checkState(lk != null, "Last key can not be null");
    byte[] lastKey = KeyValue.createKeyValueFromKey(lk).getRow();

    LOG.debug("HFile bounds: first=" + Bytes.toStringBinary(firstKey) + " last=" + Bytes
        .toStringBinary(lastKey));
    LOG.debug(
        "Region bounds: first=" + Bytes.toStringBinary(getRegionInfo().getStartKey()) + " last="
            + Bytes.toStringBinary(getRegionInfo().getEndKey()));

    if (!this.getRegionInfo().containsRange(firstKey, lastKey)) {
      throw new WrongRegionException(
          "Bulk load file " + srcPath.toString() + " does not fit inside region " + this
              .getRegionInfo().getRegionNameAsString());
    }

    if (reader.length() > conf
        .getLong(HConstants.HREGION_MAX_FILESIZE, HConstants.DEFAULT_MAX_FILE_SIZE)) {
      LOG.warn(
          "Trying to bulk load hfile " + srcPath.toString() + " with size: " + reader.length()
              + " bytes can be problematic as it may lead to oversplitting.");
    }

    if (verifyBulkLoads) {
      long verificationStartTime = EnvironmentEdgeManager.currentTime();
      LOG.info("Full verification started for bulk load hfile: " + srcPath.toString());
      Cell prevCell = null;
      HFileScanner scanner = reader.getScanner(false, false, false);
      scanner.seekTo();
      do {
        Cell cell = scanner.getKeyValue();
        if (prevCell != null) {
          if (CellComparator.compareRows(prevCell, cell) > 0) {
            throw new InvalidHFileException(
                "Previous row is greater than" + " current row: path=" + srcPath + " previous="
                    + CellUtil.getCellKeyAsString(prevCell) + " current=" + CellUtil
                    .getCellKeyAsString(cell));
          }
          if (CellComparator.compareFamilies(prevCell, cell) != 0) {
            throw new InvalidHFileException(
                "Previous key had different" + " family compared to current key: path=" + srcPath
                    + " previous=" + Bytes
                    .toStringBinary(prevCell.getFamilyArray(), prevCell.getFamilyOffset(),
                        prevCell.getFamilyLength()) + " current=" + Bytes
                    .toStringBinary(cell.getFamilyArray(), cell.getFamilyOffset(),
                        cell.getFamilyLength()));
          }
        }
        prevCell = cell;
      } while (scanner.next());
      LOG.info(
          "Full verification complete for bulk load hfile: " + srcPath.toString() + " took " + (
              EnvironmentEdgeManager.currentTime() - verificationStartTime) + " ms");
    }
  } finally {
    if (reader != null) reader.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:70,代码来源:HStore.java

示例14: getIOHitsPerSecond

import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; //导入方法依赖的package包/类
public long getIOHitsPerSecond() {
  long now = EnvironmentEdgeManager.currentTime();
  long took = (now - lastLogTime) / 1000;
  lastLogTime = now;
  return took == 0? 0: ioHitCount.get() / took;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:7,代码来源:BucketCacheStats.java

示例15: canRetryMore

import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; //导入方法依赖的package包/类
/**
 * We stop to retry when we have exhausted BOTH the number of retries and the time allocated.
 */
boolean canRetryMore(int numRetry) {
  // If there is a single try we must not take into account the time.
  return numRetry < maxRetries || (maxRetries > 1 &&
      EnvironmentEdgeManager.currentTime() < this.canRetryUntil);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:9,代码来源:ConnectionManager.java


注:本文中的org.apache.hadoop.hbase.util.EnvironmentEdgeManager.currentTime方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。