本文整理汇总了Java中org.apache.hadoop.hbase.exceptions.TimeoutIOException类的典型用法代码示例。如果您正苦于以下问题:Java TimeoutIOException类的具体用法?Java TimeoutIOException怎么用?Java TimeoutIOException使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
TimeoutIOException类属于org.apache.hadoop.hbase.exceptions包,在下文中一共展示了TimeoutIOException类的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getSequenceId
import org.apache.hadoop.hbase.exceptions.TimeoutIOException; //导入依赖的package包/类
/**
* Wait for sequence number to be assigned & return the assigned value.
* @param maxWaitForSeqId maximum time to wait in milliseconds for sequenceid
* @return long the new assigned sequence number
* @throws IOException
*/
public long getSequenceId(final long maxWaitForSeqId) throws IOException {
// TODO: This implementation waiting on a latch is problematic because if a higher level
// determines we should stop or abort, there is no global list of all these blocked WALKeys
// waiting on a sequence id; they can't be cancelled... interrupted. See getNextSequenceId.
//
// UPDATE: I think we can remove the timeout now we are stamping all walkeys with sequenceid,
// even those that have failed (previously we were not... so they would just hang out...).
// St.Ack 20150910
try {
if (maxWaitForSeqId < 0) {
this.seqNumAssignedLatch.await();
} else if (!this.seqNumAssignedLatch.await(maxWaitForSeqId, TimeUnit.MILLISECONDS)) {
throw new TimeoutIOException("Failed to get sequenceid after " + maxWaitForSeqId +
"ms; WAL system stuck or has gone away?");
}
} catch (InterruptedException ie) {
LOG.warn("Thread interrupted waiting for next log sequence number");
InterruptedIOException iie = new InterruptedIOException();
iie.initCause(ie);
throw iie;
}
return this.logSeqNum;
}
示例2: waitFor
import org.apache.hadoop.hbase.exceptions.TimeoutIOException; //导入依赖的package包/类
public static <T> T waitFor(MasterProcedureEnv env, long waitTime, long waitingTimeForEvents,
String purpose, Predicate<T> predicate) throws IOException {
final long done = EnvironmentEdgeManager.currentTime() + waitTime;
do {
T result = predicate.evaluate();
if (result != null && !result.equals(Boolean.FALSE)) {
return result;
}
try {
Thread.sleep(waitingTimeForEvents);
} catch (InterruptedException e) {
LOG.warn("Interrupted while sleeping, waiting on " + purpose);
throw (InterruptedIOException)new InterruptedIOException().initCause(e);
}
LOG.debug("Waiting on " + purpose);
} while (EnvironmentEdgeManager.currentTime() < done && env.isRunning());
throw new TimeoutIOException("Timed out while waiting on " + purpose);
}
示例3: waitFor
import org.apache.hadoop.hbase.exceptions.TimeoutIOException; //导入依赖的package包/类
public static <T> T waitFor(MasterProcedureEnv env, long waitTime, long waitingTimeForEvents,
String purpose, Predicate<T> predicate) throws IOException {
final long done = EnvironmentEdgeManager.currentTime() + waitTime;
boolean logged = false;
do {
T result = predicate.evaluate();
if (result != null && !result.equals(Boolean.FALSE)) {
return result;
}
try {
Thread.sleep(waitingTimeForEvents);
} catch (InterruptedException e) {
LOG.warn("Interrupted while sleeping, waiting on " + purpose);
throw (InterruptedIOException)new InterruptedIOException().initCause(e);
}
if (LOG.isTraceEnabled()) {
LOG.trace("waitFor " + purpose);
} else {
if (!logged) LOG.debug("waitFor " + purpose);
}
logged = true;
} while (EnvironmentEdgeManager.currentTime() < done && env.isRunning());
throw new TimeoutIOException("Timed out while waiting on " + purpose);
}
示例4: block
import org.apache.hadoop.hbase.exceptions.TimeoutIOException; //导入依赖的package包/类
/**
* An ugly utility to be removed when refactor TableNamespaceManager.
* @throws TimeoutIOException
*/
private static void block(final MasterServices services, final long procId)
throws TimeoutIOException {
int timeoutInMillis = services.getConfiguration().
getInt(ClusterSchema.HBASE_MASTER_CLUSTER_SCHEMA_OPERATION_TIMEOUT_KEY,
ClusterSchema.DEFAULT_HBASE_MASTER_CLUSTER_SCHEMA_OPERATION_TIMEOUT);
long deadlineTs = EnvironmentEdgeManager.currentTime() + timeoutInMillis;
ProcedureExecutor<MasterProcedureEnv> procedureExecutor =
services.getMasterProcedureExecutor();
while(EnvironmentEdgeManager.currentTime() < deadlineTs) {
if (procedureExecutor.isFinished(procId)) return;
// Sleep some
Threads.sleep(10);
}
throw new TimeoutIOException("Procedure pid=" + procId + " is still running");
}
示例5: get
import org.apache.hadoop.hbase.exceptions.TimeoutIOException; //导入依赖的package包/类
synchronized long get(long timeoutNs) throws InterruptedException,
ExecutionException, TimeoutIOException {
final long done = System.nanoTime() + timeoutNs;
while (!isDone()) {
wait(1000);
if (System.nanoTime() >= done) {
throw new TimeoutIOException(
"Failed to get sync result after " + TimeUnit.NANOSECONDS.toMillis(timeoutNs)
+ " ms for txid=" + this.txid + ", WAL system stuck?");
}
}
if (this.throwable != null) {
throw new ExecutionException(this.throwable);
}
return this.doneTxid;
}
示例6: blockOnSync
import org.apache.hadoop.hbase.exceptions.TimeoutIOException; //导入依赖的package包/类
protected final void blockOnSync(SyncFuture syncFuture) throws IOException {
// Now we have published the ringbuffer, halt the current thread until we get an answer back.
try {
if (syncFuture != null) {
syncFuture.get(walSyncTimeoutNs);
}
} catch (TimeoutIOException tioe) {
// SyncFuture reuse by thread, if TimeoutIOException happens, ringbuffer
// still refer to it, so if this thread use it next time may get a wrong
// result.
this.syncFuturesByHandler.remove(Thread.currentThread());
throw tioe;
} catch (InterruptedException ie) {
LOG.warn("Interrupted", ie);
throw convertInterruptedExceptionToIOException(ie);
} catch (ExecutionException e) {
throw ensureIOException(e.getCause());
}
}
示例7: test
import org.apache.hadoop.hbase.exceptions.TimeoutIOException; //导入依赖的package包/类
@Test
public void test() throws InterruptedException, ExecutionException {
SLEEP_MS = 1000;
long startNs = System.nanoTime();
try {
LOCATOR.getRegionLocation(TABLE_NAME, EMPTY_START_ROW, RegionLocateType.CURRENT,
TimeUnit.MILLISECONDS.toNanos(500)).get();
fail();
} catch (ExecutionException e) {
e.printStackTrace();
assertThat(e.getCause(), instanceOf(TimeoutIOException.class));
}
long costMs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNs);
assertTrue(costMs >= 500);
assertTrue(costMs < 1000);
// wait for the background task finish
Thread.sleep(2000);
// Now the location should be in cache, so we will not visit meta again.
HRegionLocation loc = LOCATOR.getRegionLocation(TABLE_NAME, EMPTY_START_ROW,
RegionLocateType.CURRENT, TimeUnit.MILLISECONDS.toNanos(500)).get();
assertEquals(loc.getServerName(),
TEST_UTIL.getHBaseCluster().getRegionServer(0).getServerName());
}
示例8: withTimeout
import org.apache.hadoop.hbase.exceptions.TimeoutIOException; //导入依赖的package包/类
private CompletableFuture<HRegionLocation> withTimeout(CompletableFuture<HRegionLocation> future,
long timeoutNs, Supplier<String> timeoutMsg) {
if (future.isDone() || timeoutNs <= 0) {
return future;
}
Timeout timeoutTask = retryTimer.newTimeout(t -> {
if (future.isDone()) {
return;
}
future.completeExceptionally(new TimeoutIOException(timeoutMsg.get()));
}, timeoutNs, TimeUnit.NANOSECONDS);
return future.whenComplete((loc, error) -> {
if (error != null && error.getClass() != TimeoutIOException.class) {
// cancel timeout task if we are not completed by it.
timeoutTask.cancel();
}
});
}
示例9: setTimeoutFailure
import org.apache.hadoop.hbase.exceptions.TimeoutIOException; //导入依赖的package包/类
@InterfaceAudience.Private
protected synchronized boolean setTimeoutFailure() {
if (state == ProcedureState.WAITING_TIMEOUT) {
long timeDiff = EnvironmentEdgeManager.currentTime() - lastUpdate;
setFailure("ProcedureExecutor", new TimeoutIOException(
"Operation timed out after " + StringUtils.humanTimeDiff(timeDiff)));
return true;
}
return false;
}
示例10: testGet
import org.apache.hadoop.hbase.exceptions.TimeoutIOException; //导入依赖的package包/类
@Test(expected = TimeoutIOException.class)
public void testGet() throws Exception {
long timeout = 5000;
long txid = 100000;
SyncFuture syncFulture = new SyncFuture().reset(txid);
syncFulture.done(txid, null);
assertEquals(txid, syncFulture.get(timeout));
syncFulture.reset(txid).get(timeout);
}
示例11: setTimeoutFailure
import org.apache.hadoop.hbase.exceptions.TimeoutIOException; //导入依赖的package包/类
/**
* Called by the ProcedureExecutor when the timeout set by setTimeout() is expired.
* @return true to let the framework handle the timeout as abort,
* false in case the procedure handled the timeout itself.
*/
protected synchronized boolean setTimeoutFailure(final TEnvironment env) {
if (state == ProcedureState.WAITING_TIMEOUT) {
long timeDiff = EnvironmentEdgeManager.currentTime() - lastUpdate;
setFailure("ProcedureExecutor", new TimeoutIOException(
"Operation timed out after " + StringUtils.humanTimeDiff(timeDiff)));
return true;
}
return false;
}
示例12: assertIsTimeoutException
import org.apache.hadoop.hbase.exceptions.TimeoutIOException; //导入依赖的package包/类
public static void assertIsTimeoutException(final ProcedureInfo result) {
assertEquals(true, result.isFailed());
LOG.info(result.getExceptionFullMessage());
Throwable cause = getExceptionCause(result);
assertTrue("expected TimeoutIOException, got " + cause, cause instanceof TimeoutIOException);
}
示例13: lockRowsAndBuildMiniBatch
import org.apache.hadoop.hbase.exceptions.TimeoutIOException; //导入依赖的package包/类
/**
* Creates Mini-batch of all operations [nextIndexToProcess, lastIndexExclusive) for which
* a row lock can be acquired. All mutations with locked rows are considered to be
* In-progress operations and hence the name {@link MiniBatchOperationInProgress}. Mini batch
* is window over {@link BatchOperation} and contains contiguous pending operations.
*
* @param acquiredRowLocks keeps track of rowLocks acquired.
*/
public MiniBatchOperationInProgress<Mutation> lockRowsAndBuildMiniBatch(
List<RowLock> acquiredRowLocks) throws IOException {
int readyToWriteCount = 0;
int lastIndexExclusive = 0;
RowLock prevRowLock = null;
for (; lastIndexExclusive < size(); lastIndexExclusive++) {
// It reaches the miniBatchSize, stop here and process the miniBatch
// This only applies to non-atomic batch operations.
if (!isAtomic() && (readyToWriteCount == region.miniBatchSize)) {
break;
}
if (!isOperationPending(lastIndexExclusive)) {
continue;
}
Mutation mutation = getMutation(lastIndexExclusive);
// If we haven't got any rows in our batch, we should block to get the next one.
RowLock rowLock = null;
try {
// if atomic then get exclusive lock, else shared lock
rowLock = region.getRowLockInternal(mutation.getRow(), !isAtomic(), prevRowLock);
} catch (TimeoutIOException e) {
// We will retry when other exceptions, but we should stop if we timeout .
throw e;
} catch (IOException ioe) {
LOG.warn("Failed getting lock, row=" + Bytes.toStringBinary(mutation.getRow()), ioe);
if (isAtomic()) { // fail, atomic means all or none
throw ioe;
}
}
if (rowLock == null) {
// We failed to grab another lock
if (isAtomic()) {
throw new IOException("Can't apply all operations atomically!");
}
break; // Stop acquiring more rows for this batch
} else {
if (rowLock != prevRowLock) {
// It is a different row now, add this to the acquiredRowLocks and
// set prevRowLock to the new returned rowLock
acquiredRowLocks.add(rowLock);
prevRowLock = rowLock;
}
}
readyToWriteCount++;
}
return createMiniBatch(lastIndexExclusive, readyToWriteCount);
}
示例14: assertIsTimeoutException
import org.apache.hadoop.hbase.exceptions.TimeoutIOException; //导入依赖的package包/类
public static void assertIsTimeoutException(final Procedure<?> result) {
Throwable cause = assertProcFailed(result);
assertTrue("expected TimeoutIOException, got " + cause, cause instanceof TimeoutIOException);
}