本文整理汇总了Java中org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException类的典型用法代码示例。如果您正苦于以下问题:Java RetriesExhaustedWithDetailsException类的具体用法?Java RetriesExhaustedWithDetailsException怎么用?Java RetriesExhaustedWithDetailsException使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
RetriesExhaustedWithDetailsException类属于org.apache.hadoop.hbase.client包,在下文中一共展示了RetriesExhaustedWithDetailsException类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: doPuts
import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException; //导入依赖的package包/类
private int doPuts(int maxOps, final HTable... tables) throws Exception {
int count = 0;
try {
while (count < maxOps) {
Put put = new Put(Bytes.toBytes("row-" + count));
put.addColumn(FAMILY, QUALIFIER, Bytes.toBytes("data-" + count));
for (final HTable table : tables) {
table.put(put);
}
count += tables.length;
}
} catch (RetriesExhaustedWithDetailsException e) {
for (Throwable t : e.getCauses()) {
if (!(t instanceof ThrottlingException)) {
throw e;
}
}
LOG.error("put failed after nRetries=" + count, e);
}
return count;
}
示例2: recordFailure
import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException; //导入依赖的package包/类
private void recordFailure(final Mutation m, final long keyBase,
final long start, IOException e) {
failedKeySet.add(keyBase);
String exceptionInfo;
if (e instanceof RetriesExhaustedWithDetailsException) {
RetriesExhaustedWithDetailsException aggEx = (RetriesExhaustedWithDetailsException) e;
exceptionInfo = aggEx.getExhaustiveDescription();
} else {
StringWriter stackWriter = new StringWriter();
PrintWriter pw = new PrintWriter(stackWriter);
e.printStackTrace(pw);
pw.flush();
exceptionInfo = StringUtils.stringifyException(e);
}
LOG.error("Failed to mutate: " + keyBase + " after " + (System.currentTimeMillis() - start)
+ "ms; region information: " + getRegionDebugInfoSafe(table, m.getRow()) + "; errors: "
+ exceptionInfo);
}
示例3: recordFailure
import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException; //导入依赖的package包/类
private void recordFailure(final Table table, final Put put, final long keyBase,
final long start, IOException e) {
failedKeySet.add(keyBase);
String exceptionInfo;
if (e instanceof RetriesExhaustedWithDetailsException) {
RetriesExhaustedWithDetailsException aggEx = (RetriesExhaustedWithDetailsException) e;
exceptionInfo = aggEx.getExhaustiveDescription();
} else {
StringWriter stackWriter = new StringWriter();
PrintWriter pw = new PrintWriter(stackWriter);
e.printStackTrace(pw);
pw.flush();
exceptionInfo = StringUtils.stringifyException(e);
}
LOG.error("Failed to insert: " + keyBase + " after " + (System.currentTimeMillis() - start)
+ "ms; region information: " + getRegionDebugInfoSafe(table, put.getRow()) + "; errors: "
+ exceptionInfo);
}
示例4: processEvent
import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException; //导入依赖的package包/类
@Override
public Put processEvent(Event event)
throws EventProcessingException, RetriesExhaustedWithDetailsException, InterruptedIOException {
Set<Entry<String, String>> kvp = event.getHeaders().entrySet();
// Iterator<Entry<String, String>> itr = kvp.iterator();
rowKey = getRowKey(constructDefaultRowKey(event.getHeaders(), 0), event.getHeaders());
Put putReq = new Put(rowKey);
// while(itr.hasNext()){
// Entry<String, String> entry = itr.next();
// if(entry.getValue()!=null){
//
// }
// }
putReq.add(MarauderParserConstants.MARAUDER_CF_HEADERS, "v".getBytes(),
event.getHeaders().toString().getBytes());
// if(event.getBody()!=null){
// putReq.add(MarauderParserConstants.MARAUDER_CF_MESSAGE,
// MarauderParserConstants.MARAUDER_KEY_MESSAGE,
// compressEventBody(event.getBody()));
// }
performExtendedProcessing(putReq, event);
return putReq;
}
示例5: doMutation
import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException; //导入依赖的package包/类
private void doMutation(final Mutation mutation) throws RetriesExhaustedWithDetailsException {
Long sequenceId = null;
try {
// registerOperationWithHeapSize() waits until both the memory and rpc count maximum
// requirements are achieved.
sequenceId = sizeManager.registerOperationWithHeapSize(mutation.heapSize());
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
AccountingFutureCallback callback = new AccountingFutureCallback(mutation, sequenceId);
// TODO: Consider adding the callback in another executor for the blocking call. There are some
// concerns running this callback on the rpc threadpool. This callback requires locks on
// resources. If they cannot be acquired, the callback would block future completions.
Futures.addCallback(batchExecutor.issueRequest(mutation), callback);
}
示例6: insert
import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException; //导入依赖的package包/类
public void insert(Table table, Put put, long keyBase) {
long start = System.currentTimeMillis();
try {
put = (Put) dataGenerator.beforeMutate(keyBase, put);
table.put(put);
totalOpTimeMs.addAndGet(System.currentTimeMillis() - start);
} catch (IOException e) {
failedKeySet.add(keyBase);
String exceptionInfo;
if (e instanceof RetriesExhaustedWithDetailsException) {
RetriesExhaustedWithDetailsException aggEx = (RetriesExhaustedWithDetailsException)e;
exceptionInfo = aggEx.getExhaustiveDescription();
} else {
StringWriter stackWriter = new StringWriter();
PrintWriter pw = new PrintWriter(stackWriter);
e.printStackTrace(pw);
pw.flush();
exceptionInfo = StringUtils.stringifyException(e);
}
LOG.error("Failed to insert: " + keyBase + " after " + (System.currentTimeMillis() - start)
+ "ms; region information: " + getRegionDebugInfoSafe(table, put.getRow())
+ "; errors: " + exceptionInfo);
}
}
示例7: insert
import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException; //导入依赖的package包/类
public void insert(HTable table, Put put, long keyBase) {
long start = System.currentTimeMillis();
try {
put = (Put) dataGenerator.beforeMutate(keyBase, put);
table.put(put);
totalOpTimeMs.addAndGet(System.currentTimeMillis() - start);
} catch (IOException e) {
failedKeySet.add(keyBase);
String exceptionInfo;
if (e instanceof RetriesExhaustedWithDetailsException) {
RetriesExhaustedWithDetailsException aggEx = (RetriesExhaustedWithDetailsException)e;
exceptionInfo = aggEx.getExhaustiveDescription();
} else {
StringWriter stackWriter = new StringWriter();
PrintWriter pw = new PrintWriter(stackWriter);
e.printStackTrace(pw);
pw.flush();
exceptionInfo = StringUtils.stringifyException(e);
}
LOG.error("Failed to insert: " + keyBase + " after " + (System.currentTimeMillis() - start)
+ "ms; region information: " + getRegionDebugInfoSafe(table, put.getRow())
+ "; errors: " + exceptionInfo);
}
}
示例8: recordFailure
import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException; //导入依赖的package包/类
private void recordFailure(final HTable table, final Put put, final long keyBase,
final long start, IOException e) {
failedKeySet.add(keyBase);
String exceptionInfo;
if (e instanceof RetriesExhaustedWithDetailsException) {
RetriesExhaustedWithDetailsException aggEx = (RetriesExhaustedWithDetailsException) e;
exceptionInfo = aggEx.getExhaustiveDescription();
} else {
StringWriter stackWriter = new StringWriter();
PrintWriter pw = new PrintWriter(stackWriter);
e.printStackTrace(pw);
pw.flush();
exceptionInfo = StringUtils.stringifyException(e);
}
LOG.error("Failed to insert: " + keyBase + " after " + (System.currentTimeMillis() - start)
+ "ms; region information: " + getRegionDebugInfoSafe(table, put.getRow()) + "; errors: "
+ exceptionInfo);
}
示例9: handleHBaseException
import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException; //导入依赖的package包/类
public static void handleHBaseException(
RetriesExhaustedWithDetailsException rex,
Record record,
Map<String, Record> rowKeyToRecord,
ErrorRecordHandler errorRecordHandler
) throws StageException {
for (int i = 0; i < rex.getNumExceptions(); i++) {
if (rex.getCause(i) instanceof NoSuchColumnFamilyException) {
Row r = rex.getRow(i);
Record errorRecord = record != null ? record : rowKeyToRecord.get(Bytes.toString(r.getRow()));
OnRecordErrorException exception =
new OnRecordErrorException(errorRecord, Errors.HBASE_10,
getErrorDescription(rex.getCause(i), r, rex.getHostnamePort(i)));
errorRecordHandler.onError(exception);
} else {
// If at least 1 non NoSuchColumnFamilyException exception,
// consider as stage exception
throw new StageException(Errors.HBASE_02, rex);
}
}
}
示例10: batch
import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException; //导入依赖的package包/类
/**
* Do the changes and handle the pool
* @param tableName table to insert into
* @param allRows list of actions
* @throws IOException
*/
protected void batch(TableName tableName, Collection<List<Row>> allRows) throws IOException {
if (allRows.isEmpty()) {
return;
}
Table table = null;
try {
Connection connection = getConnection();
table = connection.getTable(tableName);
for (List<Row> rows : allRows) {
table.batch(rows, null);
}
} catch (RetriesExhaustedWithDetailsException rewde) {
for (Throwable ex : rewde.getCauses()) {
if (ex instanceof TableNotFoundException) {
throw new TableNotFoundException("'"+tableName+"'");
}
}
} catch (InterruptedException ix) {
throw (InterruptedIOException) new InterruptedIOException().initCause(ix);
} finally {
if (table != null) {
table.close();
}
}
}
示例11: insert
import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException; //导入依赖的package包/类
public void insert(HTable table, Put put, long keyBase) {
long start = System.currentTimeMillis();
try {
table.put(put);
totalOpTimeMs.addAndGet(System.currentTimeMillis() - start);
} catch (IOException e) {
failedKeySet.add(keyBase);
String exceptionInfo;
if (e instanceof RetriesExhaustedWithDetailsException) {
RetriesExhaustedWithDetailsException aggEx = (RetriesExhaustedWithDetailsException)e;
exceptionInfo = aggEx.getExhaustiveDescription();
} else {
StringWriter stackWriter = new StringWriter();
PrintWriter pw = new PrintWriter(stackWriter);
e.printStackTrace(pw);
pw.flush();
exceptionInfo = StringUtils.stringifyException(e);
}
LOG.error("Failed to insert: " + keyBase + " after " + (System.currentTimeMillis() - start) +
"ms; region information: " + getRegionDebugInfoSafe(table, put.getRow()) + "; errors: "
+ exceptionInfo);
}
}
示例12: testConstraintFails
import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException; //导入依赖的package包/类
/**
* Test that constraints will fail properly
* @throws Exception
*/
@SuppressWarnings("unchecked")
@Test(timeout = 60000)
public void testConstraintFails() throws Exception {
// create the table
// it would be nice if this was also a method on the util
HTableDescriptor desc = new HTableDescriptor(tableName);
for (byte[] family : new byte[][] { dummy, test }) {
desc.addFamily(new HColumnDescriptor(family));
}
// add a constraint that is sure to fail
Constraints.add(desc, AllFailConstraint.class);
util.getHBaseAdmin().createTable(desc);
Table table = new HTable(util.getConfiguration(), tableName);
// test that we do fail on violation
Put put = new Put(row1);
put.add(dummy, new byte[0], "fail".getBytes());
LOG.warn("Doing put in table");
try {
table.put(put);
fail("This put should not have suceeded - AllFailConstraint was not run!");
} catch (RetriesExhaustedWithDetailsException e) {
List<Throwable> causes = e.getCauses();
assertEquals(
"More than one failure cause - should only be the failure constraint exception",
1, causes.size());
Throwable t = causes.get(0);
assertEquals(ConstraintException.class, t.getClass());
}
table.close();
}
示例13: mutate
import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException; //导入依赖的package包/类
public void mutate(Table table, Mutation m,
long keyBase, byte[] row, byte[] cf, byte[] q, byte[] v) {
long start = System.currentTimeMillis();
try {
m = dataGenerator.beforeMutate(keyBase, m);
if (m instanceof Increment) {
table.increment((Increment)m);
} else if (m instanceof Append) {
table.append((Append)m);
} else if (m instanceof Put) {
table.checkAndPut(row, cf, q, v, (Put)m);
} else if (m instanceof Delete) {
table.checkAndDelete(row, cf, q, v, (Delete)m);
} else {
throw new IllegalArgumentException(
"unsupported mutation " + m.getClass().getSimpleName());
}
totalOpTimeMs.addAndGet(System.currentTimeMillis() - start);
} catch (IOException e) {
if (ignoreNonceConflicts && (e instanceof OperationConflictException)) {
LOG.info("Detected nonce conflict, ignoring: " + e.getMessage());
totalOpTimeMs.addAndGet(System.currentTimeMillis() - start);
return;
}
failedKeySet.add(keyBase);
String exceptionInfo;
if (e instanceof RetriesExhaustedWithDetailsException) {
RetriesExhaustedWithDetailsException aggEx = (RetriesExhaustedWithDetailsException) e;
exceptionInfo = aggEx.getExhaustiveDescription();
} else {
exceptionInfo = StringUtils.stringifyException(e);
}
LOG.error("Failed to mutate: " + keyBase + " after " +
(System.currentTimeMillis() - start) +
"ms; region information: " + getRegionDebugInfoSafe(table, m.getRow()) + "; errors: "
+ exceptionInfo);
}
}
示例14: getTable
import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException; //导入依赖的package包/类
private HTable getTable() throws IOException {
HTable table = tables.get();
if (table == null) {
table = new HTable(conf, tableName) {
@Override
public synchronized void flushCommits() throws RetriesExhaustedWithDetailsException, InterruptedIOException {
super.flushCommits();
}
};
table.setAutoFlushTo(autoFlush);
pool.add(table); //keep track
tables.set(table);
}
return table;
}
示例15: processEvent
import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException; //导入依赖的package包/类
@Override
public Put processEvent(Event event) throws EventProcessingException, RetriesExhaustedWithDetailsException, InterruptedIOException {
time = Integer.parseInt(event.getHeaders().get(MarauderParserConstants.MARAUDER_KEY_TIMESTAMP), 16);
rowKey = getRowKey(constructDefaultRowKey(event.getHeaders(), time),
event.getHeaders());
// System.out.println(new Date((long)time*1000));
Put putReq = new Put(rowKey);
offset = TimeUtils.getWindowOffsetTime(time, windowSize);
column = new byte[6];
System.arraycopy(ByteUtils.shortToByteMSB(offset), 0, column, 0, 2);
System.arraycopy(ByteUtils.intToByteMSB(random.nextInt(1000000)), 0, column, 2, 4);
putReq.add(MarauderParserConstants.MARAUDER_CF_HEADERS, column, event.getHeaders().toString().getBytes());
performExtendedProcessing(putReq, event);
return putReq;
}