本文整理汇总了Java中org.apache.hadoop.hbase.util.RetryCounter.sleepUntilNextRetry方法的典型用法代码示例。如果您正苦于以下问题:Java RetryCounter.sleepUntilNextRetry方法的具体用法?Java RetryCounter.sleepUntilNextRetry怎么用?Java RetryCounter.sleepUntilNextRetry使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.util.RetryCounter
的用法示例。
在下文中一共展示了RetryCounter.sleepUntilNextRetry方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getRSForFirstRegionInTable
import org.apache.hadoop.hbase.util.RetryCounter; //导入方法依赖的package包/类
/**
* Tool to get the reference to the region server object that holds the
* region of the specified user table.
* It first searches for the meta rows that contain the region of the
* specified table, then gets the index of that RS, and finally retrieves
* the RS's reference.
* @param tableName user table to lookup in hbase:meta
* @return region server that holds it, null if the row doesn't exist
* @throws IOException
* @throws InterruptedException
*/
public HRegionServer getRSForFirstRegionInTable(TableName tableName)
throws IOException, InterruptedException {
List<byte[]> metaRows = getMetaTableRows(tableName);
if (metaRows == null || metaRows.isEmpty()) {
return null;
}
LOG.debug("Found " + metaRows.size() + " rows for table " +
tableName);
byte [] firstrow = metaRows.get(0);
LOG.debug("FirstRow=" + Bytes.toString(firstrow));
long pause = getConfiguration().getLong(HConstants.HBASE_CLIENT_PAUSE,
HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
int numRetries = getConfiguration().getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
RetryCounter retrier = new RetryCounter(numRetries+1, (int)pause, TimeUnit.MICROSECONDS);
while(retrier.shouldRetry()) {
int index = getMiniHBaseCluster().getServerWith(firstrow);
if (index != -1) {
return getMiniHBaseCluster().getRegionServerThreads().get(index).getRegionServer();
}
// Came back -1. Region may not be online yet. Sleep a while.
retrier.sleepUntilNextRetry();
}
return null;
}
示例2: execWithRetries
import org.apache.hadoop.hbase.util.RetryCounter; //导入方法依赖的package包/类
private Pair<Integer, String> execWithRetries(String hostname, ServiceType service, String... cmd)
throws IOException {
RetryCounter retryCounter = retryCounterFactory.create();
while (true) {
try {
return exec(hostname, service, cmd);
} catch (IOException e) {
retryOrThrow(retryCounter, e, hostname, cmd);
}
try {
retryCounter.sleepUntilNextRetry();
} catch (InterruptedException ex) {
// ignore
LOG.warn("Sleep Interrupted:" + ex);
}
}
}
示例3: getChildren
import org.apache.hadoop.hbase.util.RetryCounter; //导入方法依赖的package包/类
/**
* getChildren is an idempotent operation. Retry before throwing exception
* @return List of children znodes
*/
public List<String> getChildren(String path, Watcher watcher)
throws KeeperException, InterruptedException {
RetryCounter retryCounter = retryCounterFactory.create();
while (true) {
try {
return zk.getChildren(path, watcher);
} catch (KeeperException e) {
switch (e.code()) {
case CONNECTIONLOSS:
case SESSIONEXPIRED:
case OPERATIONTIMEOUT:
retryOrThrow(retryCounter, e, "getChildren");
break;
default:
throw e;
}
}
retryCounter.sleepUntilNextRetry();
retryCounter.useRetry();
}
}
示例4: getData
import org.apache.hadoop.hbase.util.RetryCounter; //导入方法依赖的package包/类
/**
* getData is an idempotent operation. Retry before throwing exception
* @return Data
*/
public byte[] getData(String path, Watcher watcher, Stat stat)
throws KeeperException, InterruptedException {
RetryCounter retryCounter = retryCounterFactory.create();
while (true) {
try {
byte[] revData = zk.getData(path, watcher, stat);
return this.removeMetaData(revData);
} catch (KeeperException e) {
switch (e.code()) {
case CONNECTIONLOSS:
case SESSIONEXPIRED:
case OPERATIONTIMEOUT:
retryOrThrow(retryCounter, e, "getData");
break;
default:
throw e;
}
}
retryCounter.sleepUntilNextRetry();
retryCounter.useRetry();
}
}
示例5: getRSForFirstRegionInTable
import org.apache.hadoop.hbase.util.RetryCounter; //导入方法依赖的package包/类
/**
* Tool to get the reference to the region server object that holds the
* region of the specified user table.
* It first searches for the meta rows that contain the region of the
* specified table, then gets the index of that RS, and finally retrieves
* the RS's reference.
* @param tableName user table to lookup in hbase:meta
* @return region server that holds it, null if the row doesn't exist
* @throws IOException
*/
public HRegionServer getRSForFirstRegionInTable(TableName tableName)
throws IOException, InterruptedException {
List<byte[]> metaRows = getMetaTableRows(tableName);
if (metaRows == null || metaRows.isEmpty()) {
return null;
}
LOG.debug("Found " + metaRows.size() + " rows for table " +
tableName);
byte [] firstrow = metaRows.get(0);
LOG.debug("FirstRow=" + Bytes.toString(firstrow));
long pause = getConfiguration().getLong(HConstants.HBASE_CLIENT_PAUSE,
HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
int numRetries = getConfiguration().getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
RetryCounter retrier = new RetryCounter(numRetries+1, (int)pause, TimeUnit.MICROSECONDS);
while(retrier.shouldRetry()) {
int index = getMiniHBaseCluster().getServerWith(firstrow);
if (index != -1) {
return getMiniHBaseCluster().getRegionServerThreads().get(index).getRegionServer();
}
// Came back -1. Region may not be online yet. Sleep a while.
retrier.sleepUntilNextRetry();
}
return null;
}
示例6: multi
import org.apache.hadoop.hbase.util.RetryCounter; //导入方法依赖的package包/类
/**
* Run multiple operations in a transactional manner. Retry before throwing exception
*/
public List<OpResult> multi(Iterable<Op> ops)
throws KeeperException, InterruptedException {
RetryCounter retryCounter = retryCounterFactory.create();
Iterable<Op> multiOps = prepareZKMulti(ops);
while (true) {
try {
return zk.multi(multiOps);
} catch (KeeperException e) {
switch (e.code()) {
case CONNECTIONLOSS:
case SESSIONEXPIRED:
case OPERATIONTIMEOUT:
retryOrThrow(retryCounter, e, "multi");
break;
default:
throw e;
}
}
retryCounter.sleepUntilNextRetry();
retryCounter.useRetry();
}
}
示例7: execWithRetries
import org.apache.hadoop.hbase.util.RetryCounter; //导入方法依赖的package包/类
private Pair<Integer, String> execWithRetries(String hostname, String... cmd)
throws IOException {
RetryCounter retryCounter = retryCounterFactory.create();
while (true) {
try {
return exec(hostname, cmd);
} catch (IOException e) {
retryOrThrow(retryCounter, e, hostname, cmd);
}
try {
retryCounter.sleepUntilNextRetry();
} catch (InterruptedException ex) {
// ignore
LOG.warn("Sleep Interrupted:" + ex);
}
}
}
示例8: exists
import org.apache.hadoop.hbase.util.RetryCounter; //导入方法依赖的package包/类
/**
* exists is an idempotent operation. Retry before throwing exception
* @return A Stat instance
*/
public Stat exists(String path, Watcher watcher) throws KeeperException, InterruptedException {
try (TraceScope scope = TraceUtil.createTrace("RecoverableZookeeper.exists")) {
RetryCounter retryCounter = retryCounterFactory.create();
while (true) {
try {
long startTime = EnvironmentEdgeManager.currentTime();
Stat nodeStat = checkZk().exists(path, watcher);
return nodeStat;
} catch (KeeperException e) {
switch (e.code()) {
case CONNECTIONLOSS:
retryOrThrow(retryCounter, e, "exists");
break;
case OPERATIONTIMEOUT:
retryOrThrow(retryCounter, e, "exists");
break;
default:
throw e;
}
}
retryCounter.sleepUntilNextRetry();
}
}
}
示例9: exists
import org.apache.hadoop.hbase.util.RetryCounter; //导入方法依赖的package包/类
/**
* exists is an idempotent operation. Retry before throwing exception
* @return A Stat instance
*/
public Stat exists(String path, boolean watch)
throws KeeperException, InterruptedException {
RetryCounter retryCounter = retryCounterFactory.create();
while (true) {
try {
return zk.exists(path, watch);
} catch (KeeperException e) {
switch (e.code()) {
case CONNECTIONLOSS:
case SESSIONEXPIRED:
case OPERATIONTIMEOUT:
retryOrThrow(retryCounter, e, "exists");
break;
default:
throw e;
}
}
retryCounter.sleepUntilNextRetry();
retryCounter.useRetry();
}
}
示例10: getData
import org.apache.hadoop.hbase.util.RetryCounter; //导入方法依赖的package包/类
/**
* getData is an idemnpotent operation. Retry before throwing exception
* @return Data
*/
public byte[] getData(String path, boolean watch, Stat stat)
throws KeeperException, InterruptedException {
RetryCounter retryCounter = retryCounterFactory.create();
while (true) {
try {
byte[] revData = zk.getData(path, watch, stat);
return this.removeMetaData(revData);
} catch (KeeperException e) {
switch (e.code()) {
case CONNECTIONLOSS:
case SESSIONEXPIRED:
case OPERATIONTIMEOUT:
retryOrThrow(retryCounter, e, "getData");
break;
default:
throw e;
}
}
retryCounter.sleepUntilNextRetry();
retryCounter.useRetry();
}
}
示例11: exists
import org.apache.hadoop.hbase.util.RetryCounter; //导入方法依赖的package包/类
/**
* exists is an idempotent operation. Retry before throwing exception
* @return A Stat instance
*/
public Stat exists(String path, Watcher watcher)
throws KeeperException, InterruptedException {
RetryCounter retryCounter = retryCounterFactory.create();
while (true) {
try {
return zk.exists(path, watcher);
} catch (KeeperException e) {
switch (e.code()) {
case CONNECTIONLOSS:
case SESSIONEXPIRED:
case OPERATIONTIMEOUT:
retryOrThrow(retryCounter, e, "exists");
break;
default:
throw e;
}
}
retryCounter.sleepUntilNextRetry();
retryCounter.useRetry();
}
}
示例12: isServerReachable
import org.apache.hadoop.hbase.util.RetryCounter; //导入方法依赖的package包/类
/**
* Check if a region server is reachable and has the expected start code
*/
public boolean isServerReachable(ServerName server) {
if (server == null) throw new NullPointerException("Passed server is null");
RetryCounter retryCounter = pingRetryCounterFactory.create();
while (retryCounter.shouldRetry()) {
synchronized (this.onlineServers) {
if (this.deadservers.isDeadServer(server)) {
return false;
}
}
try {
PayloadCarryingRpcController controller = newRpcController();
AdminService.BlockingInterface admin = getRsAdmin(server);
if (admin != null) {
ServerInfo info = ProtobufUtil.getServerInfo(controller, admin);
return info != null && info.hasServerName()
&& server.getStartcode() == info.getServerName().getStartCode();
}
} catch (IOException ioe) {
if (LOG.isDebugEnabled()) {
LOG.debug("Couldn't reach " + server + ", try=" + retryCounter.getAttemptTimes() + " of "
+ retryCounter.getMaxAttempts(), ioe);
}
try {
retryCounter.sleepUntilNextRetry();
} catch(InterruptedException ie) {
Thread.currentThread().interrupt();
break;
}
}
}
return false;
}
示例13: delete
import org.apache.hadoop.hbase.util.RetryCounter; //导入方法依赖的package包/类
/**
* delete is an idempotent operation. Retry before throwing exception.
* This function will not throw NoNodeException if the path does not
* exist.
*/
public void delete(String path, int version)
throws InterruptedException, KeeperException {
TraceScope traceScope = null;
try {
traceScope = Trace.startSpan("RecoverableZookeeper.delete");
RetryCounter retryCounter = retryCounterFactory.create();
boolean isRetry = false; // False for first attempt, true for all retries.
while (true) {
try {
checkZk().delete(path, version);
return;
} catch (KeeperException e) {
switch (e.code()) {
case NONODE:
if (isRetry) {
LOG.debug("Node " + path + " already deleted. Assuming a " +
"previous attempt succeeded.");
return;
}
LOG.debug("Node " + path + " already deleted, retry=" + isRetry);
throw e;
case CONNECTIONLOSS:
case SESSIONEXPIRED:
case OPERATIONTIMEOUT:
retryOrThrow(retryCounter, e, "delete");
break;
default:
throw e;
}
}
retryCounter.sleepUntilNextRetry();
isRetry = true;
}
} finally {
if (traceScope != null) traceScope.close();
}
}
示例14: exists
import org.apache.hadoop.hbase.util.RetryCounter; //导入方法依赖的package包/类
/**
* exists is an idempotent operation. Retry before throwing exception
* @return A Stat instance
*/
public Stat exists(String path, Watcher watcher)
throws KeeperException, InterruptedException {
TraceScope traceScope = null;
try {
traceScope = Trace.startSpan("RecoverableZookeeper.exists");
RetryCounter retryCounter = retryCounterFactory.create();
while (true) {
try {
return checkZk().exists(path, watcher);
} catch (KeeperException e) {
switch (e.code()) {
case CONNECTIONLOSS:
case SESSIONEXPIRED:
case OPERATIONTIMEOUT:
retryOrThrow(retryCounter, e, "exists");
break;
default:
throw e;
}
}
retryCounter.sleepUntilNextRetry();
}
} finally {
if (traceScope != null) traceScope.close();
}
}
示例15: getChildren
import org.apache.hadoop.hbase.util.RetryCounter; //导入方法依赖的package包/类
/**
* getChildren is an idempotent operation. Retry before throwing exception
* @return List of children znodes
*/
public List<String> getChildren(String path, Watcher watcher)
throws KeeperException, InterruptedException {
TraceScope traceScope = null;
try {
traceScope = Trace.startSpan("RecoverableZookeeper.getChildren");
RetryCounter retryCounter = retryCounterFactory.create();
while (true) {
try {
return checkZk().getChildren(path, watcher);
} catch (KeeperException e) {
switch (e.code()) {
case CONNECTIONLOSS:
case SESSIONEXPIRED:
case OPERATIONTIMEOUT:
retryOrThrow(retryCounter, e, "getChildren");
break;
default:
throw e;
}
}
retryCounter.sleepUntilNextRetry();
}
} finally {
if (traceScope != null) traceScope.close();
}
}