本文整理汇总了Java中org.apache.hadoop.util.Time.now方法的典型用法代码示例。如果您正苦于以下问题:Java Time.now方法的具体用法?Java Time.now怎么用?Java Time.now使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.util.Time
的用法示例。
在下文中一共展示了Time.now方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: save
import org.apache.hadoop.util.Time; //导入方法依赖的package包/类
@Override
public void save() throws IOException {
state.lastSavedMs = Time.now();
boolean success = false;
ObjectMapper mapper = new ObjectMapper();
try (BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(
new FileOutputStream(getTempSaveFile(), false), "UTF-8"))) {
mapper.writerWithDefaultPrettyPrinter().writeValue(writer, state);
success = true;
} finally {
if (!success) {
if (getTempSaveFile().delete()) {
LOG.debug("save({}, {}): error deleting temporary file.",
storageID, bpid);
}
}
}
Files.move(getTempSaveFile().toPath(), getSaveFile().toPath(),
StandardCopyOption.ATOMIC_MOVE);
if (LOG.isTraceEnabled()) {
LOG.trace("save({}, {}): saved {}", storageID, bpid,
mapper.writerWithDefaultPrettyPrinter().writeValueAsString(state));
}
}
示例2: testRepeatingThread
import org.apache.hadoop.util.Time; //导入方法依赖的package包/类
@Test
public void testRepeatingThread() throws Exception {
final AtomicInteger counter = new AtomicInteger();
TestContext ctx = new TestContext();
ctx.addThread(new RepeatingTestThread(ctx) {
@Override
public void doAnAction() throws Exception {
counter.incrementAndGet();
}
});
ctx.startThreads();
long st = Time.now();
ctx.waitFor(3000);
ctx.stop();
long et = Time.now();
long elapsed = et - st;
// Test should have waited just about 3 seconds
assertTrue("Test took " + (et - st) + "ms",
Math.abs(elapsed - 3000) < 500);
// Counter should have been incremented lots of times in 3 full seconds
assertTrue("Counter value = " + counter.get(),
counter.get() > 1000);
}
示例3: testRandomExpirations
import org.apache.hadoop.util.Time; //导入方法依赖的package包/类
/**
* Randomly expire the ZK sessions of the two ZKFCs. This differs
* from the above test in that it is not a controlled failover -
* we just do random expirations and expect neither one to ever
* generate fatal exceptions.
*/
@Test(timeout=(STRESS_RUNTIME_SECS + EXTRA_TIMEOUT_SECS) * 1000)
public void testRandomExpirations() throws Exception {
cluster.start();
long st = Time.now();
long runFor = STRESS_RUNTIME_SECS * 1000;
Random r = new Random();
while (Time.now() - st < runFor) {
cluster.getTestContext().checkException();
int targetIdx = r.nextInt(2);
ActiveStandbyElector target = cluster.getElector(targetIdx);
long sessId = target.getZKSessionIdForTests();
if (sessId != -1) {
LOG.info(String.format("Expiring session %x for svc %d",
sessId, targetIdx));
getServer(serverFactory).closeSession(sessId);
}
Thread.sleep(r.nextInt(300));
}
}
示例4: destroy
import org.apache.hadoop.util.Time; //导入方法依赖的package包/类
@Override
public void destroy() {
try {
long limit = Time.now() + 30 * 1000;
scheduler.shutdownNow();
while (!scheduler.awaitTermination(1000, TimeUnit.MILLISECONDS)) {
LOG.debug("Waiting for scheduler to shutdown");
if (Time.now() > limit) {
LOG.warn("Gave up waiting for scheduler to shutdown");
break;
}
}
if (scheduler.isTerminated()) {
LOG.debug("Scheduler shutdown");
}
} catch (InterruptedException ex) {
LOG.warn(ex.getMessage(), ex);
}
}
示例5: doIO
import org.apache.hadoop.util.Time; //导入方法依赖的package包/类
private void doIO(InputStream in, OutputStream out,
int expectedTimeout) throws IOException {
/* Keep on writing or reading until we get SocketTimeoutException.
* It expects this exception to occur within 100 millis of TIMEOUT.
*/
byte buf[] = new byte[PAGE_SIZE + 19];
while (true) {
long start = Time.now();
try {
if (in != null) {
in.read(buf);
} else {
out.write(buf);
}
} catch (SocketTimeoutException e) {
long diff = Time.now() - start;
LOG.info("Got SocketTimeoutException as expected after " +
diff + " millis : " + e.getMessage());
assertTrue(Math.abs(expectedTimeout - diff) <=
TestNetUtils.TIME_FUDGE_MILLIS);
break;
}
}
}
示例6: testGetLocalHostIsFast
import org.apache.hadoop.util.Time; //导入方法依赖的package包/类
/**
* Test that repeated calls to getting the local host are fairly fast, and
* hence that caching is being used
* @throws Exception if hostname lookups fail
*/
@Test
public void testGetLocalHostIsFast() throws Exception {
String hostname1 = DNS.getDefaultHost(DEFAULT);
assertNotNull(hostname1);
String hostname2 = DNS.getDefaultHost(DEFAULT);
long t1 = Time.now();
String hostname3 = DNS.getDefaultHost(DEFAULT);
long t2 = Time.now();
assertEquals(hostname3, hostname2);
assertEquals(hostname2, hostname1);
long interval = t2 - t1;
assertTrue(
"Took too long to determine local host - caching is not working",
interval < 20000);
}
示例7: snapshotMetrics
import org.apache.hadoop.util.Time; //导入方法依赖的package包/类
private void snapshotMetrics(MetricsSourceAdapter sa,
MetricsBufferBuilder bufferBuilder) {
long startTime = Time.now();
bufferBuilder.add(sa.name(), sa.getMetrics(collector, true));
collector.clear();
snapshotStat.add(Time.now() - startTime);
LOG.debug("Snapshotted source "+ sa.name());
}
示例8: run
import org.apache.hadoop.util.Time; //导入方法依赖的package包/类
@Override
public void run() {
LOG.info("Starting expired delegation token remover thread, "
+ "tokenRemoverScanInterval=" + tokenRemoverScanInterval
/ (60 * 1000) + " min(s)");
try {
while (running) {
long now = Time.now();
if (lastMasterKeyUpdate + keyUpdateInterval < now) {
try {
rollMasterKey();
lastMasterKeyUpdate = now;
} catch (IOException e) {
LOG.error("Master key updating failed: ", e);
}
}
if (lastTokenCacheCleanup + tokenRemoverScanInterval < now) {
removeExpiredToken();
lastTokenCacheCleanup = now;
}
try {
Thread.sleep(Math.min(5000, keyUpdateInterval)); // 5 seconds
} catch (InterruptedException ie) {
LOG.error("ExpiredTokenRemover received " + ie);
}
}
} catch (Throwable t) {
LOG.error("ExpiredTokenRemover thread received unexpected exception", t);
Runtime.getRuntime().exit(-1);
}
}
示例9: BlockIteratorState
import org.apache.hadoop.util.Time; //导入方法依赖的package包/类
BlockIteratorState() {
lastSavedMs = iterStartMs = Time.now();
curFinalizedDir = null;
curFinalizedSubDir = null;
curEntry = null;
atEnd = false;
}
示例10: setTimeForSynchronousBPOSCalls
import org.apache.hadoop.util.Time; //导入方法依赖的package包/类
private void setTimeForSynchronousBPOSCalls() {
if (firstCallTime == 0) {
firstCallTime = Time.now();
} else {
secondCallTime = Time.now();
}
}
示例11: generateDataEncryptionKey
import org.apache.hadoop.util.Time; //导入方法依赖的package包/类
/**
* Generate a data encryption key for this block pool, using the current
* BlockKey.
*
* @return a data encryption key which may be used to encrypt traffic
* over the DataTransferProtocol
*/
public DataEncryptionKey generateDataEncryptionKey() {
byte[] nonce = new byte[8];
nonceGenerator.nextBytes(nonce);
BlockKey key = null;
synchronized (this) {
key = currentKey;
}
byte[] encryptionKey = createPassword(nonce, key.getKey());
return new DataEncryptionKey(key.getKeyId(), blockPoolId, nonce,
encryptionKey, Time.now() + tokenLifetime,
encryptionAlgorithm);
}
示例12: waitForConnected
import org.apache.hadoop.util.Time; //导入方法依赖的package包/类
@VisibleForTesting
public synchronized void waitForConnected(long timeout)
throws InterruptedException, TimeoutException {
long expire = Time.now() + timeout;
long left = timeout;
while(!connected && left > 0) {
wait(left);
left = expire - Time.now();
}
if (!connected) {
throw new TimeoutException("Did not connect");
}
}
示例13: checkTGTAndReloginFromKeytab
import org.apache.hadoop.util.Time; //导入方法依赖的package包/类
/**
* Re-login a user from keytab if TGT is expired or is close to expiry.
*
* @throws IOException
*/
public synchronized void checkTGTAndReloginFromKeytab() throws IOException {
if (!isSecurityEnabled()
|| user.getAuthenticationMethod() != AuthenticationMethod.KERBEROS
|| !isKeytab)
return;
KerberosTicket tgt = getTGT();
if (tgt != null && !shouldRenewImmediatelyForTests &&
Time.now() < getRefreshTime(tgt)) {
return;
}
reloginFromKeytab();
}
示例14: testMultipleTokensDoNotDeadlock
import org.apache.hadoop.util.Time; //导入方法依赖的package包/类
@Test(timeout=4000)
public void testMultipleTokensDoNotDeadlock() throws IOException,
InterruptedException {
Configuration conf = mock(Configuration.class);
FileSystem fs = mock(FileSystem.class);
doReturn(conf).when(fs).getConf();
long distantFuture = Time.now() + 3600 * 1000; // 1h
Token<?> token1 = mock(Token.class);
doReturn(new Text("myservice1")).when(token1).getService();
doReturn(distantFuture).when(token1).renew(eq(conf));
Token<?> token2 = mock(Token.class);
doReturn(new Text("myservice2")).when(token2).getService();
doReturn(distantFuture).when(token2).renew(eq(conf));
RenewableFileSystem fs1 = mock(RenewableFileSystem.class);
doReturn(conf).when(fs1).getConf();
doReturn(token1).when(fs1).getRenewToken();
RenewableFileSystem fs2 = mock(RenewableFileSystem.class);
doReturn(conf).when(fs2).getConf();
doReturn(token2).when(fs2).getRenewToken();
renewer.addRenewAction(fs1);
renewer.addRenewAction(fs2);
assertEquals(2, renewer.getRenewQueueLength());
renewer.removeRenewAction(fs1);
assertEquals(1, renewer.getRenewQueueLength());
renewer.removeRenewAction(fs2);
assertEquals(0, renewer.getRenewQueueLength());
verify(token1).cancel(eq(conf));
verify(token2).cancel(eq(conf));
}
示例15: setupCluster
import org.apache.hadoop.util.Time; //导入方法依赖的package包/类
public static void setupCluster(int replicationFactor, HdfsConfiguration conf) throws Exception {
util = new BlockReaderTestUtil(replicationFactor, conf);
dfsClient = util.getDFSClient();
long seed = Time.now();
LOG.info("Random seed: " + seed);
rand = new Random(seed);
}