本文整理汇总了Java中org.apache.hadoop.util.Time.monotonicNow方法的典型用法代码示例。如果您正苦于以下问题:Java Time.monotonicNow方法的具体用法?Java Time.monotonicNow怎么用?Java Time.monotonicNow使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.util.Time
的用法示例。
在下文中一共展示了Time.monotonicNow方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getInternal
import org.apache.hadoop.util.Time; //导入方法依赖的package包/类
private synchronized Peer getInternal(DatanodeID dnId, boolean isDomain) {
List<Value> sockStreamList = multimap.get(new Key(dnId, isDomain));
if (sockStreamList == null) {
return null;
}
Iterator<Value> iter = sockStreamList.iterator();
while (iter.hasNext()) {
Value candidate = iter.next();
iter.remove();
long ageMs = Time.monotonicNow() - candidate.getTime();
Peer peer = candidate.getPeer();
if (ageMs >= expiryPeriod) {
try {
peer.close();
} catch (IOException e) {
LOG.warn("got IOException closing stale peer " + peer +
", which is " + ageMs + " ms old");
}
} else if (!peer.isClosed()) {
return peer;
}
}
return null;
}
示例2: trimEvictionMaps
import org.apache.hadoop.util.Time; //导入方法依赖的package包/类
/**
* Trim the eviction lists.
*/
private void trimEvictionMaps() {
long now = Time.monotonicNow();
demoteOldEvictableMmaped(now);
while (true) {
long evictableSize = evictable.size();
long evictableMmappedSize = evictableMmapped.size();
if (evictableSize + evictableMmappedSize <= maxTotalSize) {
return;
}
ShortCircuitReplica replica;
if (evictableSize == 0) {
replica = evictableMmapped.firstEntry().getValue();
} else {
replica = evictable.firstEntry().getValue();
}
if (LOG.isTraceEnabled()) {
LOG.trace(this + ": trimEvictionMaps is purging " + replica +
StringUtils.getStackTrace(Thread.currentThread()));
}
purge(replica);
}
}
示例3: loadINodeDirSection
import org.apache.hadoop.util.Time; //导入方法依赖的package包/类
private void loadINodeDirSection(
FileInputStream fin, List<FileSummary.Section> sections,
FileSummary summary, Configuration conf, List<Long> refIdList)
throws IOException {
LOG.info("Loading INode directory section.");
long startTime = Time.monotonicNow();
for (FileSummary.Section section : sections) {
if (SectionName.fromString(section.getName())
== SectionName.INODE_DIR) {
fin.getChannel().position(section.getOffset());
InputStream is = FSImageUtil.wrapInputStreamForCompression(conf,
summary.getCodec(), new BufferedInputStream(
new LimitInputStream(fin, section.getLength())));
buildNamespace(is, refIdList);
}
}
long timeTaken = Time.monotonicNow() - startTime;
LOG.info("Finished loading INode directory section in {}ms", timeTaken);
}
示例4: closeFile
import org.apache.hadoop.util.Time; //导入方法依赖的package包/类
/** Close a file. */
void closeFile(final long inodeId, final DFSClient dfsc) {
dfsc.removeFileBeingWritten(inodeId);
synchronized(this) {
if (dfsc.isFilesBeingWrittenEmpty()) {
dfsclients.remove(dfsc);
}
//update emptyTime if necessary
if (emptyTime == Long.MAX_VALUE) {
for(DFSClient c : dfsclients) {
if (!c.isFilesBeingWrittenEmpty()) {
//found a non-empty file-being-written map
return;
}
}
//discover the first time that all file-being-written maps are empty.
emptyTime = Time.monotonicNow();
}
}
}
示例5: run
import org.apache.hadoop.util.Time; //导入方法依赖的package包/类
@Override
public void run() {
while (shouldRun) {
scan(streamTimeout);
// Check if it can sleep
try {
long workedTime = Time.monotonicNow() - lastWakeupTime;
if (workedTime < rotation) {
if (LOG.isTraceEnabled()) {
LOG.trace("StreamMonitor can still have a sleep:"
+ ((rotation - workedTime) / 1000));
}
Thread.sleep(rotation - workedTime);
}
lastWakeupTime = Time.monotonicNow();
} catch (InterruptedException e) {
LOG.info("StreamMonitor got interrupted");
return;
}
}
}
示例6: waitForHeartBeat
import org.apache.hadoop.util.Time; //导入方法依赖的package包/类
/**
* Wait until heartbeat gives expected results, within CAPACITY_ALLOWED_VARIANCE,
* summed over all nodes. Times out after TIMEOUT msec.
* @param expectedUsedSpace
* @param expectedTotalSpace
* @throws IOException - if getStats() fails
* @throws TimeoutException
*/
static void waitForHeartBeat(long expectedUsedSpace,
long expectedTotalSpace, ClientProtocol client, MiniDFSCluster cluster)
throws IOException, TimeoutException {
long timeout = TIMEOUT;
long failtime = (timeout <= 0L) ? Long.MAX_VALUE
: Time.monotonicNow() + timeout;
while (true) {
long[] status = client.getStats();
double totalSpaceVariance = Math.abs((double)status[0] - expectedTotalSpace)
/ expectedTotalSpace;
double usedSpaceVariance = Math.abs((double)status[1] - expectedUsedSpace)
/ expectedUsedSpace;
if (totalSpaceVariance < CAPACITY_ALLOWED_VARIANCE
&& usedSpaceVariance < CAPACITY_ALLOWED_VARIANCE)
break; //done
if (Time.monotonicNow() > failtime) {
throw new TimeoutException("Cluster failed to reached expected values of "
+ "totalSpace (current: " + status[0]
+ ", expected: " + expectedTotalSpace
+ "), or usedSpace (current: " + status[1]
+ ", expected: " + expectedUsedSpace
+ "), in more than " + timeout + " msec.");
}
try {
Thread.sleep(100L);
} catch(InterruptedException ignored) {
}
}
}
示例7: updateMapIncr
import org.apache.hadoop.util.Time; //导入方法依赖的package包/类
synchronized private void updateMapIncr(final String name,
final boolean isGrp) throws IOException {
if (!checkSupportedPlatform()) {
return;
}
if (isInteger(name) && isGrp) {
loadFullGroupMap();
return;
}
boolean updated = false;
updateStaticMapping();
if (OS.startsWith("Linux")) {
if (isGrp) {
updated = updateMapInternal(gidNameMap, "group",
getName2IdCmdLinux(name, true), ":",
staticMapping.gidMapping);
} else {
updated = updateMapInternal(uidNameMap, "user",
getName2IdCmdLinux(name, false), ":",
staticMapping.uidMapping);
}
} else {
// Mac
if (isGrp) {
updated = updateMapInternal(gidNameMap, "group",
getName2IdCmdMac(name, true), "\\s+",
staticMapping.gidMapping);
} else {
updated = updateMapInternal(uidNameMap, "user",
getName2IdCmdMac(name, false), "\\s+",
staticMapping.uidMapping);
}
}
if (updated) {
lastUpdateTime = Time.monotonicNow();
}
}
示例8: load
import org.apache.hadoop.util.Time; //导入方法依赖的package包/类
void load(File file) throws IOException {
long start = Time.monotonicNow();
imgDigest = MD5FileUtils.computeMd5ForFile(file);
RandomAccessFile raFile = new RandomAccessFile(file, "r");
FileInputStream fin = new FileInputStream(file);
try {
loadInternal(raFile, fin);
long end = Time.monotonicNow();
LOG.info("Loaded FSImage in " + (end - start) / 1000 + " seconds.");
} finally {
fin.close();
raFile.close();
}
}
示例9: waitForAckedSeqno
import org.apache.hadoop.util.Time; //导入方法依赖的package包/类
private void waitForAckedSeqno(long seqno) throws IOException {
TraceScope scope = Trace.startSpan("waitForAckedSeqno", Sampler.NEVER);
try {
if (DFSClient.LOG.isDebugEnabled()) {
DFSClient.LOG.debug("Waiting for ack for: " + seqno);
}
long begin = Time.monotonicNow();
try {
synchronized (dataQueue) {
while (!isClosed()) {
checkClosed();
if (lastAckedSeqno >= seqno) {
break;
}
try {
dataQueue.wait(1000); // when we receive an ack, we notify on
// dataQueue
} catch (InterruptedException ie) {
throw new InterruptedIOException(
"Interrupted while waiting for data to be acknowledged by pipeline");
}
}
}
checkClosed();
} catch (ClosedChannelException e) {
}
long duration = Time.monotonicNow() - begin;
if (duration > dfsclientSlowLogThresholdMs) {
DFSClient.LOG.warn("Slow waitForAckedSeqno took " + duration
+ "ms (threshold=" + dfsclientSlowLogThresholdMs + "ms)");
}
} finally {
scope.close();
}
}
示例10: increment
import org.apache.hadoop.util.Time; //导入方法依赖的package包/类
/**
* Increment the counter, and reset it if there is no increment
* for acertain time period.
*
* @return the new count.
*/
synchronized long increment() {
final long now = Time.monotonicNow();
if (now - timestamp > countResetTimePeriodMs) {
count = 0; // reset the counter
}
timestamp = now;
return ++count;
}
示例11: shouldDefer
import org.apache.hadoop.util.Time; //导入方法依赖的package包/类
private boolean shouldDefer() {
/* If revocationTimeMs == 0, this is an immediate uncache request.
* No clients were anchored at the time we made the request. */
if (revocationTimeMs == 0) {
return false;
}
/* Let's check if any clients still have this block anchored. */
boolean anchored =
!dataset.datanode.getShortCircuitRegistry().
processBlockMunlockRequest(key);
if (!anchored) {
LOG.debug("Uncaching {} now that it is no longer in use " +
"by any clients.", key);
return false;
}
long delta = revocationTimeMs - Time.monotonicNow();
if (delta < 0) {
LOG.warn("Forcibly uncaching {} after {} " +
"because client(s) {} refused to stop using it.", key,
DurationFormatUtils.formatDurationHMS(revocationTimeMs),
dataset.datanode.getShortCircuitRegistry().getClientNames(key));
return false;
}
LOG.info("Replica {} still can't be uncached because some " +
"clients continue to use it. Will wait for {}", key,
DurationFormatUtils.formatDurationHMS(delta));
return true;
}
示例12: RamDiskReplica
import org.apache.hadoop.util.Time; //导入方法依赖的package包/类
RamDiskReplica(final String bpid, final long blockId,
final FsVolumeImpl ramDiskVolume) {
this.bpid = bpid;
this.blockId = blockId;
this.ramDiskVolume = ramDiskVolume;
lazyPersistVolume = null;
savedMetaFile = null;
savedBlockFile = null;
creationTime = Time.monotonicNow();
isPersisted = false;
}
示例13: testThreadName
import org.apache.hadoop.util.Time; //导入方法依赖的package包/类
@Test
public void testThreadName() throws Exception {
DFSOutputStream mockStream = Mockito.mock(DFSOutputStream.class);
long fileId = 789L;
Assert.assertFalse("Renewer not initially running",
renewer.isRunning());
// Pretend to open a file
renewer.put(fileId, mockStream, MOCK_DFSCLIENT);
Assert.assertTrue("Renewer should have started running",
renewer.isRunning());
// Check the thread name is reasonable
String threadName = renewer.getDaemonName();
Assert.assertEquals("LeaseRenewer:[email protected]://nn1/", threadName);
// Pretend to close the file
renewer.closeFile(fileId, MOCK_DFSCLIENT);
renewer.setEmptyTime(Time.monotonicNow());
// Should stop the renewer running within a few seconds
long failTime = Time.monotonicNow() + 5000;
while (renewer.isRunning() && Time.monotonicNow() < failTime) {
Thread.sleep(50);
}
Assert.assertFalse(renewer.isRunning());
}
示例14: isDelayActive
import org.apache.hadoop.util.Time; //导入方法依赖的package包/类
synchronized private boolean isDelayActive() {
if (delayUntil == 0 || Time.monotonicNow() > delayUntil) {
delayUntil = 0;
return false;
}
return true;
}
示例15: LoadBalancingKMSClientProvider
import org.apache.hadoop.util.Time; //导入方法依赖的package包/类
public LoadBalancingKMSClientProvider(KMSClientProvider[] providers,
Configuration conf) {
this(shuffle(providers), Time.monotonicNow(), conf);
}