本文整理汇总了Java中org.apache.hadoop.hbase.DoNotRetryIOException类的典型用法代码示例。如果您正苦于以下问题:Java DoNotRetryIOException类的具体用法?Java DoNotRetryIOException怎么用?Java DoNotRetryIOException使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
DoNotRetryIOException类属于org.apache.hadoop.hbase包,在下文中一共展示了DoNotRetryIOException类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testCompression
import org.apache.hadoop.hbase.DoNotRetryIOException; //导入依赖的package包/类
public static void testCompression(Compression.Algorithm algo)
throws IOException {
if (compressionTestResults[algo.ordinal()] != null) {
if (compressionTestResults[algo.ordinal()]) {
return ; // already passed test, dont do it again.
} else {
// failed.
throw new DoNotRetryIOException("Compression algorithm '" + algo.getName() + "'" +
" previously failed test.");
}
}
try {
Compressor c = algo.getCompressor();
algo.returnCompressor(c);
compressionTestResults[algo.ordinal()] = true; // passes
} catch (Throwable t) {
compressionTestResults[algo.ordinal()] = false; // failure
throw new DoNotRetryIOException(t);
}
}
示例2: StoreScanner
import org.apache.hadoop.hbase.DoNotRetryIOException; //导入依赖的package包/类
/**
* Opens a scanner across memstore, snapshot, and all StoreFiles. Assumes we
* are not in a compaction.
*
* @param store who we scan
* @param scan the spec
* @param columns which columns we are scanning
* @throws IOException
*/
public StoreScanner(Store store, ScanInfo scanInfo, Scan scan, final NavigableSet<byte[]> columns,
long readPt) throws IOException {
this(store, scan, scanInfo, columns, readPt, scan.getCacheBlocks());
if (columns != null && scan.isRaw()) {
throw new DoNotRetryIOException("Cannot specify any column for a raw scan");
}
matcher = new ScanQueryMatcher(scan, scanInfo, columns, ScanType.USER_SCAN, Long.MAX_VALUE,
HConstants.LATEST_TIMESTAMP, oldestUnexpiredTS, now, store.getCoprocessorHost());
this.store.addChangedReaderObserver(this);
// Pass columns to try to filter out unnecessary StoreFiles.
List<KeyValueScanner> scanners = getScannersNoCompaction();
// Seek all scanners to the start of the Row (or if the exact matching row
// key does not exist, then to the start of the next matching Row).
// Always check bloom filter to optimize the top row seek for delete
// family marker.
seekScanners(scanners, matcher.getStartKey(), explicitColumnQuery && lazySeekEnabledGlobally,
parallelSeekEnabled);
// set storeLimit
this.storeLimit = scan.getMaxResultsPerColumnFamily();
// set rowOffset
this.storeOffset = scan.getRowOffsetPerColumnFamily();
// Combine all seeked scanners with a heap
resetKVHeap(scanners, store.getComparator());
}
示例3: buildRegionAction
import org.apache.hadoop.hbase.DoNotRetryIOException; //导入依赖的package包/类
/**
* Create a protocol buffer MultiRequest for row mutations.
* Does not propagate Action absolute position. Does not set atomic action on the created
* RegionAtomic. Caller should do that if wanted.
* @param regionName
* @param rowMutations
* @return a data-laden RegionMutation.Builder
* @throws IOException
*/
public static RegionAction.Builder buildRegionAction(final byte [] regionName,
final RowMutations rowMutations)
throws IOException {
RegionAction.Builder builder =
getRegionActionBuilderWithRegion(RegionAction.newBuilder(), regionName);
ClientProtos.Action.Builder actionBuilder = ClientProtos.Action.newBuilder();
MutationProto.Builder mutationBuilder = MutationProto.newBuilder();
for (Mutation mutation: rowMutations.getMutations()) {
MutationType mutateType = null;
if (mutation instanceof Put) {
mutateType = MutationType.PUT;
} else if (mutation instanceof Delete) {
mutateType = MutationType.DELETE;
} else {
throw new DoNotRetryIOException("RowMutations supports only put and delete, not " +
mutation.getClass().getName());
}
mutationBuilder.clear();
MutationProto mp = ProtobufUtil.toMutation(mutateType, mutation, mutationBuilder);
actionBuilder.clear();
actionBuilder.setMutation(mp);
builder.addAction(actionBuilder.build());
}
return builder;
}
示例4: buildNoDataRegionAction
import org.apache.hadoop.hbase.DoNotRetryIOException; //导入依赖的package包/类
/**
* Create a protocol buffer MultiRequest for row mutations that does not hold data. Data/Cells
* are carried outside of protobuf. Return references to the Cells in <code>cells</code> param.
* Does not propagate Action absolute position. Does not set atomic action on the created
* RegionAtomic. Caller should do that if wanted.
* @param regionName
* @param rowMutations
* @param cells Return in here a list of Cells as CellIterable.
* @return a region mutation minus data
* @throws IOException
*/
public static RegionAction.Builder buildNoDataRegionAction(final byte[] regionName,
final RowMutations rowMutations, final List<CellScannable> cells,
final RegionAction.Builder regionActionBuilder,
final ClientProtos.Action.Builder actionBuilder,
final MutationProto.Builder mutationBuilder)
throws IOException {
for (Mutation mutation: rowMutations.getMutations()) {
MutationType type = null;
if (mutation instanceof Put) {
type = MutationType.PUT;
} else if (mutation instanceof Delete) {
type = MutationType.DELETE;
} else {
throw new DoNotRetryIOException("RowMutations supports only put and delete, not " +
mutation.getClass().getName());
}
mutationBuilder.clear();
MutationProto mp = ProtobufUtil.toMutationNoData(type, mutation, mutationBuilder);
cells.add(mutation);
actionBuilder.clear();
regionActionBuilder.addAction(actionBuilder.setMutation(mp).build());
}
return regionActionBuilder;
}
示例5: toFilter
import org.apache.hadoop.hbase.DoNotRetryIOException; //导入依赖的package包/类
/**
* Convert a protocol buffer Filter to a client Filter
*
* @param proto the protocol buffer Filter to convert
* @return the converted Filter
*/
@SuppressWarnings("unchecked")
public static Filter toFilter(FilterProtos.Filter proto) throws IOException {
String type = proto.getName();
final byte [] value = proto.getSerializedFilter().toByteArray();
String funcName = "parseFrom";
try {
Class<? extends Filter> c =
(Class<? extends Filter>)Class.forName(type, true, CLASS_LOADER);
Method parseFrom = c.getMethod(funcName, byte[].class);
if (parseFrom == null) {
throw new IOException("Unable to locate function: " + funcName + " in type: " + type);
}
return (Filter)parseFrom.invoke(c, value);
} catch (Exception e) {
// Either we couldn't instantiate the method object, or "parseFrom" failed.
// In either case, let's not retry.
throw new DoNotRetryIOException(e);
}
}
示例6: classifyExs
import org.apache.hadoop.hbase.DoNotRetryIOException; //导入依赖的package包/类
public static Map<String, Integer> classifyExs(List<Throwable> ths) {
Map<String, Integer> cls = new HashMap<String, Integer>();
for (Throwable t : ths) {
if (t == null) continue;
String name = "";
if (t instanceof DoNotRetryIOException) {
name = t.getMessage();
} else {
name = t.getClass().getSimpleName();
}
Integer i = cls.get(name);
if (i == null) {
i = 0;
}
i += 1;
cls.put(name, i);
}
return cls;
}
示例7: manageError
import org.apache.hadoop.hbase.DoNotRetryIOException; //导入依赖的package包/类
/**
* Check that we can retry acts accordingly: logs, set the error status.
*
* @param originalIndex the position in the list sent
* @param row the row
* @param canRetry if false, we won't retry whatever the settings.
* @param throwable the throwable, if any (can be null)
* @param server the location, if any (can be null)
* @return true if the action can be retried, false otherwise.
*/
public Retry manageError(int originalIndex, Row row, Retry canRetry,
Throwable throwable, ServerName server) {
if (canRetry == Retry.YES
&& throwable != null && (throwable instanceof DoNotRetryIOException ||
throwable instanceof NeedUnmanagedConnectionException)) {
canRetry = Retry.NO_NOT_RETRIABLE;
}
if (canRetry != Retry.YES) {
// Batch.Callback<Res> was not called on failure in 0.94. We keep this.
setError(originalIndex, row, throwable, server);
} else if (isActionComplete(originalIndex, row)) {
canRetry = Retry.NO_OTHER_SUCCEEDED;
}
return canRetry;
}
示例8: testOnServerWithNoProcedureSupport
import org.apache.hadoop.hbase.DoNotRetryIOException; //导入依赖的package包/类
/**
* When a new client with procedure support tries to ask an old-master without proc-support
* the procedure result we get a DoNotRetryIOException (which is an UnsupportedOperationException)
* The future should trap that and fallback to the waitOperationResult().
*
* This happens when the operation calls happens on a "new master" but while we are waiting
* the operation to be completed, we failover on an "old master".
*/
@Test(timeout=60000)
public void testOnServerWithNoProcedureSupport() throws Exception {
HBaseAdmin admin = Mockito.mock(HBaseAdmin.class);
TestFuture f = new TestFuture(admin, 100L) {
@Override
protected GetProcedureResultResponse getProcedureResult(
final GetProcedureResultRequest request) throws IOException {
super.getProcedureResult(request);
throw new DoNotRetryIOException(new UnsupportedOperationException("getProcedureResult"));
}
};
f.get(1, TimeUnit.MINUTES);
assertTrue("expected getProcedureResult() to be called", f.wasGetProcedureResultCalled());
assertFalse("unexpected convertResult() called", f.wasConvertResultCalled());
assertTrue("expected waitOperationResult() to be called", f.wasWaitOperationResultCalled());
assertTrue("expected postOperationResult() to be called", f.wasPostOperationResultCalled());
}
示例9: checkTimestamps
import org.apache.hadoop.hbase.DoNotRetryIOException; //导入依赖的package包/类
private void checkTimestamps(final Map<byte[], List<KeyValue>> familyMap, long now)
throws DoNotRetryIOException {
if (timestampSlop == HConstants.LATEST_TIMESTAMP) {
return;
}
long maxTs = now + timestampSlop;
for (List<KeyValue> kvs : familyMap.values()) {
for (KeyValue kv : kvs) {
// see if the user-side TS is out of range. latest = server-side
if (!kv.isLatestTimestamp() && kv.getTimestamp() > maxTs) {
throw new DoNotRetryIOException("Timestamp for KV out of range " + kv + " (too.new="
+ timestampSlop + ")");
}
}
}
}
示例10: testCheckAndPut_wrongRowInPut
import org.apache.hadoop.hbase.DoNotRetryIOException; //导入依赖的package包/类
public void testCheckAndPut_wrongRowInPut() throws IOException {
this.region = initHRegion(tableName, this.getName(), conf, COLUMNS);
try {
Put put = new Put(row2);
put.add(fam1, qual1, value1);
try {
boolean res = region.checkAndMutate(row, fam1, qual1, CompareOp.EQUAL,
new BinaryComparator(value2), put, null, false);
fail();
} catch (DoNotRetryIOException expected) {
// expected exception.
}
} finally {
HRegion.closeHRegion(this.region);
this.region = null;
}
}
示例11: testmutateRowsWithLocks_wrongCF
import org.apache.hadoop.hbase.DoNotRetryIOException; //导入依赖的package包/类
public void testmutateRowsWithLocks_wrongCF() throws IOException {
this.region = initHRegion(tableName, this.getName(), conf, fam1, fam2);
try {
Put put = new Put(row2);
put.add(fam3, qual1, value1);
RowMutations rm = new RowMutations(row2);
rm.add(put);
try {
region.mutateRow(rm);
fail();
} catch (DoNotRetryIOException expected) {
// expected exception.
LOG.debug("Caught expected exception: " + expected.getMessage());
}
} finally {
HRegion.closeHRegion(this.region);
this.region = null;
}
}
示例12: testCheckAndPutDiffRow
import org.apache.hadoop.hbase.DoNotRetryIOException; //导入依赖的package包/类
/**
* Requirement 7.2 - Throws an IOException if the check is for a row other than the one in the
* mutation attempt.
*/
@Test
public void testCheckAndPutDiffRow() throws IOException {
// Initialize
Table table = getConnection().getTable(TABLE_NAME);
byte[] rowKey1 = dataHelper.randomData("rowKey-");
byte[] rowKey2 = dataHelper.randomData("rowKey-");
byte[] qual = dataHelper.randomData("qualifier-");
byte[] value = dataHelper.randomData("value-");
// Put then again
Put put = new Put(rowKey1).addColumn(COLUMN_FAMILY, qual, value);
expectedException.expect(DoNotRetryIOException.class);
expectedException.expectMessage("Action's getRow must match the passed row");
table.checkAndPut(rowKey2, COLUMN_FAMILY, qual, null, put);
table.close();
}
示例13: testCheckAndDeleteDiffRow
import org.apache.hadoop.hbase.DoNotRetryIOException; //导入依赖的package包/类
@Test
public void testCheckAndDeleteDiffRow() throws IOException {
// Initialize
Table table = getConnection().getTable(TABLE_NAME);
byte[] rowKey1 = dataHelper.randomData("rowKey-");
byte[] rowKey2 = dataHelper.randomData("rowKey-");
byte[] qual = dataHelper.randomData("qualifier-");
// Put then again
Delete delete = new Delete(rowKey1).addColumns(COLUMN_FAMILY, qual);
expectedException.expect(DoNotRetryIOException.class);
expectedException.expectMessage("Action's getRow must match the passed row");
table.checkAndDelete(rowKey2, COLUMN_FAMILY, qual, null, delete);
table.close();
}
示例14: testFailOnIncrementInt
import org.apache.hadoop.hbase.DoNotRetryIOException; //导入依赖的package包/类
/**
* Requirement 6.6 - Increment should fail on non-64-bit values, and succeed on any 64-bit value.
*/
@Test
@Category(KnownGap.class)
public void testFailOnIncrementInt() throws IOException {
// Initialize
Table table = getConnection().getTable(TABLE_NAME);
byte[] rowKey = dataHelper.randomData("testrow-");
byte[] qual = dataHelper.randomData("qual-");
int value = new Random().nextInt();
Put put = new Put(rowKey).addColumn(COLUMN_FAMILY, qual, Bytes.toBytes(value));
table.put(put);
// Increment
Increment increment = new Increment(rowKey).addColumn(COLUMN_FAMILY, qual, 1L);
expectedException.expect(DoNotRetryIOException.class);
expectedException.expectMessage("Attempted to increment field that isn't 64 bits wide");
table.increment(increment);
}
示例15: testFailOnIncrementString
import org.apache.hadoop.hbase.DoNotRetryIOException; //导入依赖的package包/类
/**
* Requirement 6.6
*/
@Test
@Category(KnownGap.class)
public void testFailOnIncrementString() throws IOException {
// Initialize
Table table = getConnection().getTable(TABLE_NAME);
byte[] rowKey = dataHelper.randomData("testrow-");
byte[] qual = dataHelper.randomData("qual-");
byte[] value = dataHelper.randomData("value-");
Put put = new Put(rowKey).addColumn(COLUMN_FAMILY, qual, value);
table.put(put);
// Increment
Increment increment = new Increment(rowKey).addColumn(COLUMN_FAMILY, qual, 1L);
expectedException.expect(DoNotRetryIOException.class);
expectedException.expectMessage("Attempted to increment field that isn't 64 bits wide");
table.increment(increment);
}