本文整理汇总了Java中org.apache.hadoop.hbase.util.EnvironmentEdgeManager类的典型用法代码示例。如果您正苦于以下问题:Java EnvironmentEdgeManager类的具体用法?Java EnvironmentEdgeManager怎么用?Java EnvironmentEdgeManager使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
EnvironmentEdgeManager类属于org.apache.hadoop.hbase.util包,在下文中一共展示了EnvironmentEdgeManager类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: checkAndSetTimeout
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; //导入依赖的package包/类
/**
* Check if the call did timeout. Set an exception (includes a notify) if it's the case.
* @return true if the call is on timeout, false otherwise.
*/
public boolean checkAndSetTimeout() {
if (timeout == 0){
return false;
}
long waitTime = EnvironmentEdgeManager.currentTime() - getStartTime();
if (waitTime >= timeout) {
IOException ie = new CallTimeoutException("Call id=" + id +
", waitTime=" + waitTime + ", operationTimeout=" + timeout + " expired.");
setException(ie); // includes a notify
return true;
} else {
return false;
}
}
示例2: removeExpiredKeys
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; //导入依赖的package包/类
synchronized void removeExpiredKeys() {
if (!leaderElector.isMaster()) {
LOG.info("Skipping removeExpiredKeys() because not running as master.");
return;
}
long now = EnvironmentEdgeManager.currentTime();
Iterator<AuthenticationKey> iter = allKeys.values().iterator();
while (iter.hasNext()) {
AuthenticationKey key = iter.next();
if (key.getExpiration() < now) {
if (LOG.isDebugEnabled()) {
LOG.debug("Removing expired key "+key.getKeyId());
}
iter.remove();
zkWatcher.removeKeyFromZK(key);
}
}
}
示例3: rollCurrentKey
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; //导入依赖的package包/类
synchronized void rollCurrentKey() {
if (!leaderElector.isMaster()) {
LOG.info("Skipping rollCurrentKey() because not running as master.");
return;
}
long now = EnvironmentEdgeManager.currentTime();
AuthenticationKey prev = currentKey;
AuthenticationKey newKey = new AuthenticationKey(++idSeq,
Long.MAX_VALUE, // don't allow to expire until it's replaced by a new key
generateSecret());
allKeys.put(newKey.getKeyId(), newKey);
currentKey = newKey;
zkWatcher.addKeyToZK(newKey);
lastKeyUpdate = now;
if (prev != null) {
// make sure previous key is still stored
prev.setExpiration(now + tokenMaxLifetime);
allKeys.put(prev.getKeyId(), prev);
zkWatcher.updateKeyInZK(prev);
}
}
示例4: heartbeat
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; //导入依赖的package包/类
private void heartbeat(String path, int new_version, ServerName workerName) {
Task task = findOrCreateOrphanTask(path);
if (new_version != task.last_version) {
if (task.isUnassigned()) {
LOG.info("task " + path + " acquired by " + workerName);
}
task.heartbeat(EnvironmentEdgeManager.currentTime(), new_version, workerName);
SplitLogCounters.tot_mgr_heartbeat.incrementAndGet();
} else {
// duplicate heartbeats - heartbeats w/o zk node version
// changing - are possible. The timeout thread does
// getDataSetWatch() just to check whether a node still
// exists or not
}
return;
}
示例5: chore
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; //导入依赖的package包/类
@Override
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "GC_UNRELATED_TYPES",
justification = "I do not understand why the complaints, it looks good to me -- FIX")
protected void chore() {
// Prefetch online tables/namespaces
for (TableName table : QuotaCache.this.rsServices.getOnlineTables()) {
if (table.isSystemTable()) continue;
if (!QuotaCache.this.tableQuotaCache.contains(table)) {
QuotaCache.this.tableQuotaCache.putIfAbsent(table, new QuotaState());
}
String ns = table.getNamespaceAsString();
if (!QuotaCache.this.namespaceQuotaCache.contains(ns)) {
QuotaCache.this.namespaceQuotaCache.putIfAbsent(ns, new QuotaState());
}
}
fetchNamespaceQuotaState();
fetchTableQuotaState();
fetchUserQuotaState();
lastUpdate = EnvironmentEdgeManager.currentTime();
}
示例6: refill
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; //导入依赖的package包/类
@Override
public long refill(long limit) {
final long now = EnvironmentEdgeManager.currentTime();
if (nextRefillTime == -1) {
// Till now no resource has been consumed.
nextRefillTime = EnvironmentEdgeManager.currentTime();
return limit;
}
long delta = (limit * (now - nextRefillTime)) / super.getTimeUnitInMillis();
if (delta > 0) {
this.nextRefillTime = now;
return Math.min(limit, delta);
}
return 0;
}
示例7: createTableLock
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; //导入依赖的package包/类
private InterProcessLock createTableLock() {
String tableLockZNode = ZKUtil.joinZNode(zkWatcher.tableLockZNode,
tableName.getNameAsString());
ZooKeeperProtos.TableLock data = ZooKeeperProtos.TableLock.newBuilder()
.setTableName(ProtobufUtil.toProtoTableName(tableName))
.setLockOwner(ProtobufUtil.toServerName(serverName))
.setThreadId(Thread.currentThread().getId())
.setPurpose(purpose)
.setIsShared(isShared)
.setCreateTime(EnvironmentEdgeManager.currentTime()).build();
byte[] lockMetadata = toBytes(data);
InterProcessReadWriteLock lock = new ZKInterProcessReadWriteLock(zkWatcher, tableLockZNode,
METADATA_HANDLER);
return isShared ? lock.readLock(lockMetadata) : lock.writeLock(lockMetadata);
}
示例8: splitLog
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; //导入依赖的package包/类
/**
* This method is the base split method that splits WAL files matching a filter. Callers should
* pass the appropriate filter for meta and non-meta WALs.
* @param serverNames logs belonging to these servers will be split; this will rename the log
* directory out from under a soft-failed server
* @param filter
* @throws IOException
*/
public void splitLog(final Set<ServerName> serverNames, PathFilter filter) throws IOException {
long splitTime = 0, splitLogSize = 0;
List<Path> logDirs = getLogDirs(serverNames);
splitLogManager.handleDeadWorkers(serverNames);
splitTime = EnvironmentEdgeManager.currentTime();
splitLogSize = splitLogManager.splitLogDistributed(serverNames, logDirs, filter);
splitTime = EnvironmentEdgeManager.currentTime() - splitTime;
if (this.metricsMasterFilesystem != null) {
if (filter == META_FILTER) {
this.metricsMasterFilesystem.addMetaWALSplit(splitTime, splitLogSize);
} else {
this.metricsMasterFilesystem.addSplit(splitTime, splitLogSize);
}
}
}
示例9: waitUntilDone
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; //导入依赖的package包/类
@Override
protected boolean waitUntilDone(long timeout) throws InterruptedException {
long startTime = EnvironmentEdgeManager.currentTime();
long remaining = timeout;
List<HRegionInfo> regions = null;
long lastLogTime = startTime;
while (!server.isStopped() && remaining > 0) {
Thread.sleep(waitingTimeForEvents);
regions = assignmentManager.getRegionStates().getRegionsOfTable(tableName);
long now = EnvironmentEdgeManager.currentTime();
// Don't log more than once every ten seconds. Its obnoxious. And only log table regions
// if we are waiting a while for them to go down...
if (LOG.isDebugEnabled() && ((now - lastLogTime) > 10000)) {
lastLogTime = now;
LOG.debug("Disable waiting until done; " + remaining + " ms remaining; " + regions);
}
if (regions.isEmpty()) break;
remaining = timeout - (now - startTime);
}
return regions != null && regions.isEmpty();
}
示例10: waitFor
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; //导入依赖的package包/类
public static <T> T waitFor(MasterProcedureEnv env, long waitTime, long waitingTimeForEvents,
String purpose, Predicate<T> predicate) throws IOException {
final long done = EnvironmentEdgeManager.currentTime() + waitTime;
do {
T result = predicate.evaluate();
if (result != null && !result.equals(Boolean.FALSE)) {
return result;
}
try {
Thread.sleep(waitingTimeForEvents);
} catch (InterruptedException e) {
LOG.warn("Interrupted while sleeping, waiting on " + purpose);
throw (InterruptedIOException)new InterruptedIOException().initCause(e);
}
LOG.debug("Waiting on " + purpose);
} while (EnvironmentEdgeManager.currentTime() < done && env.isRunning());
throw new TimeoutIOException("Timed out while waiting on " + purpose);
}
示例11: waitOnRegionToClearRegionsInTransition
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; //导入依赖的package包/类
/**
* Wait on region to clear regions-in-transition or time out
* @param hri
* @param timeOut Milliseconds to wait for current region to be out of transition state.
* @return True when a region clears regions-in-transition before timeout otherwise false
* @throws InterruptedException
*/
public boolean waitOnRegionToClearRegionsInTransition(final HRegionInfo hri, long timeOut)
throws InterruptedException {
if (!regionStates.isRegionInTransition(hri)) return true;
long end = (timeOut <= 0) ? Long.MAX_VALUE : EnvironmentEdgeManager.currentTime()
+ timeOut;
// There is already a timeout monitor on regions in transition so I
// should not have to have one here too?
LOG.info("Waiting for " + hri.getEncodedName() +
" to leave regions-in-transition, timeOut=" + timeOut + " ms.");
while (!this.server.isStopped() && regionStates.isRegionInTransition(hri)) {
regionStates.waitForUpdate(100);
if (EnvironmentEdgeManager.currentTime() > end) {
LOG.info("Timed out on waiting for " + hri.getEncodedName() + " to be assigned.");
return false;
}
}
if (this.server.isStopped()) {
LOG.info("Giving up wait on regions in transition because stoppable.isStopped is set");
return false;
}
return true;
}
示例12: archiveStoreFile
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; //导入依赖的package包/类
/**
* Archive the store file
* @param fs the filesystem where the store files live
* @param regionInfo region hosting the store files
* @param conf {@link Configuration} to examine to determine the archive directory
* @param tableDir {@link Path} to where the table is being stored (for building the archive path)
* @param family the family hosting the store files
* @param storeFile file to be archived
* @throws IOException if the files could not be correctly disposed.
*/
public static void archiveStoreFile(Configuration conf, FileSystem fs, HRegionInfo regionInfo,
Path tableDir, byte[] family, Path storeFile) throws IOException {
Path storeArchiveDir = HFileArchiveUtil.getStoreArchivePath(conf, regionInfo, tableDir, family);
// make sure we don't archive if we can't and that the archive dir exists
if (!fs.mkdirs(storeArchiveDir)) {
throw new IOException("Could not make archive directory (" + storeArchiveDir + ") for store:"
+ Bytes.toString(family) + ", deleting compacted files instead.");
}
// do the actual archive
long start = EnvironmentEdgeManager.currentTime();
File file = new FileablePath(fs, storeFile);
if (!resolveAndArchiveFile(storeArchiveDir, file, Long.toString(start))) {
throw new IOException("Failed to archive/delete the file for region:"
+ regionInfo.getRegionNameAsString() + ", family:" + Bytes.toString(family)
+ " into " + storeArchiveDir + ". Something is probably awry on the filesystem.");
}
}
示例13: testCanExecuteOfAverageIntervalRateLimiter
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; //导入依赖的package包/类
@Test
public void testCanExecuteOfAverageIntervalRateLimiter() throws InterruptedException {
RateLimiter limiter = new AverageIntervalRateLimiter();
// when set limit is 100 per sec, this AverageIntervalRateLimiter will support at max 200 per sec
limiter.set(100, TimeUnit.SECONDS);
limiter.setNextRefillTime(EnvironmentEdgeManager.currentTime());
assertEquals(50, testCanExecuteByRate(limiter, 50));
// refill the avail to limit
limiter.set(100, TimeUnit.SECONDS);
limiter.setNextRefillTime(EnvironmentEdgeManager.currentTime());
assertEquals(100, testCanExecuteByRate(limiter, 100));
// refill the avail to limit
limiter.set(100, TimeUnit.SECONDS);
limiter.setNextRefillTime(EnvironmentEdgeManager.currentTime());
assertEquals(200, testCanExecuteByRate(limiter, 200));
// refill the avail to limit
limiter.set(100, TimeUnit.SECONDS);
limiter.setNextRefillTime(EnvironmentEdgeManager.currentTime());
assertEquals(200, testCanExecuteByRate(limiter, 500));
}
示例14: TimeoutExceptionInjector
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; //导入依赖的package包/类
/**
* Create a generic timer for a task/process.
* @param listener listener to notify if the process times out
* @param maxTime max allowed running time for the process. Timer starts on calls to
* {@link #start()}
*/
public TimeoutExceptionInjector(final ForeignExceptionListener listener, final long maxTime) {
this.maxTime = maxTime;
timer = new Timer();
timerTask = new TimerTask() {
@Override
public void run() {
// ensure we don't run this task multiple times
synchronized (this) {
// quick exit if we already marked the task complete
if (TimeoutExceptionInjector.this.complete) return;
// mark the task is run, to avoid repeats
TimeoutExceptionInjector.this.complete = true;
}
long end = EnvironmentEdgeManager.currentTime();
TimeoutException tee = new TimeoutException(
"Timeout caused Foreign Exception", start, end, maxTime);
String source = "timer-" + timer;
listener.receive(new ForeignException(source, tee));
}
};
}
示例15: testSortExtract
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; //导入依赖的package包/类
@Test
public void testSortExtract(){
ManualEnvironmentEdge mee = new ManualEnvironmentEdge();
EnvironmentEdgeManager.injectEdge(mee);
mee.setValue(1);
DeadServer d = new DeadServer();
d.add(hostname123);
mee.incValue(1);
d.add(hostname1234);
mee.incValue(1);
d.add(hostname12345);
List<Pair<ServerName, Long>> copy = d.copyDeadServersSince(2L);
Assert.assertEquals(2, copy.size());
Assert.assertEquals(hostname1234, copy.get(0).getFirst());
Assert.assertEquals(new Long(2L), copy.get(0).getSecond());
Assert.assertEquals(hostname12345, copy.get(1).getFirst());
Assert.assertEquals(new Long(3L), copy.get(1).getSecond());
EnvironmentEdgeManager.reset();
}