本文整理汇总了Java中org.apache.hadoop.hbase.client.Append类的典型用法代码示例。如果您正苦于以下问题:Java Append类的具体用法?Java Append怎么用?Java Append使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
Append类属于org.apache.hadoop.hbase.client包,在下文中一共展示了Append类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testAppendHook
import org.apache.hadoop.hbase.client.Append; //导入依赖的package包/类
@Test
public void testAppendHook() throws IOException {
final TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + "." + name.getMethodName());
Table table = util.createTable(tableName, new byte[][] { A, B, C });
try {
Append app = new Append(Bytes.toBytes(0));
app.addColumn(A, A, A);
verifyMethodResult(SimpleRegionObserver.class,
new String[] { "hadPreAppend", "hadPostAppend", "hadPreAppendAfterRowLock" }, tableName,
new Boolean[] { false, false, false });
table.append(app);
verifyMethodResult(SimpleRegionObserver.class,
new String[] { "hadPreAppend", "hadPostAppend", "hadPreAppendAfterRowLock" }, tableName,
new Boolean[] { true, true, true });
} finally {
util.deleteTable(tableName);
table.close();
}
}
示例2: preAppend
import org.apache.hadoop.hbase.client.Append; //导入依赖的package包/类
@Override
public Result preAppend(final ObserverContext<RegionCoprocessorEnvironment> e,
final Append append) throws IOException {
NavigableMap<byte [], List<Cell>> map = append.getFamilyCellMap();
for (Map.Entry<byte [], List<Cell>> entry : map.entrySet()) {
for (Cell cell : entry.getValue()) {
String appendStr = Bytes.toString(cell.getValueArray(), cell.getValueOffset(),
cell.getValueLength());
if (appendStr.equals("b")) {
tr10 = append.getTimeRange();
} else if (appendStr.equals("c") && !append.getTimeRange().isAllTime()) {
tr2 = append.getTimeRange();
}
}
}
return null;
}
示例3: appendFromThrift
import org.apache.hadoop.hbase.client.Append; //导入依赖的package包/类
public static Append appendFromThrift(TAppend append) throws IOException {
Append out = new Append(append.getRow());
for (TColumnValue column : append.getColumns()) {
out.add(column.getFamily(), column.getQualifier(), column.getValue());
}
if (append.isSetAttributes()) {
addAttributes(out, append.getAttributes());
}
if (append.isSetDurability()) {
out.setDurability(durabilityFromThrift(append.getDurability()));
}
if(append.getCellVisibility() != null) {
out.setCellVisibility(new CellVisibility(append.getCellVisibility().getExpression()));
}
return out;
}
示例4: appendFromThrift
import org.apache.hadoop.hbase.client.Append; //导入依赖的package包/类
/**
* From a {@link TAppend} create an {@link Append}.
* @param tappend the Thrift version of an append.
* @return an increment that the {@link TAppend} represented.
*/
public static Append appendFromThrift(TAppend tappend) {
Append append = new Append(tappend.getRow());
List<ByteBuffer> columns = tappend.getColumns();
List<ByteBuffer> values = tappend.getValues();
if (columns.size() != values.size()) {
throw new IllegalArgumentException(
"Sizes of columns and values in tappend object are not matching");
}
int length = columns.size();
for (int i = 0; i < length; i++) {
byte[][] famAndQf = KeyValue.parseColumn(getBytes(columns.get(i)));
append.add(famAndQf[0], famAndQf[1], getBytes(values.get(i)));
}
return append;
}
示例5: preAppendAfterRowLock
import org.apache.hadoop.hbase.client.Append; //导入依赖的package包/类
@Override
public Result preAppendAfterRowLock(final ObserverContext<RegionCoprocessorEnvironment> c,
final Append append) throws IOException {
if (append.getAttribute(CHECK_COVERING_PERM) != null) {
// We had failure with table, cf and q perm checks and now giving a chance for cell
// perm check
TableName table = c.getEnvironment().getRegion().getRegionInfo().getTable();
AuthResult authResult = null;
if (checkCoveringPermission(OpType.APPEND, c.getEnvironment(), append.getRow(),
append.getFamilyCellMap(), HConstants.LATEST_TIMESTAMP, Action.WRITE)) {
authResult = AuthResult.allow(OpType.APPEND.toString(), "Covering cell set",
getActiveUser(), Action.WRITE, table, append.getFamilyCellMap());
} else {
authResult = AuthResult.deny(OpType.APPEND.toString(), "Covering cell set",
getActiveUser(), Action.WRITE, table, append.getFamilyCellMap());
}
logResult(authResult);
if (authorizationEnabled && !authResult.isAllowed()) {
throw new AccessDeniedException("Insufficient permissions " +
authResult.toContextString());
}
}
return null;
}
示例6: testAppend
import org.apache.hadoop.hbase.client.Append; //导入依赖的package包/类
@Test (timeout=180000)
public void testAppend() throws Exception {
AccessTestAction appendAction = new AccessTestAction() {
@Override
public Object run() throws Exception {
byte[] row = TEST_ROW;
byte[] qualifier = TEST_QUALIFIER;
Put put = new Put(row);
put.add(TEST_FAMILY, qualifier, Bytes.toBytes(1));
Append append = new Append(row);
append.add(TEST_FAMILY, qualifier, Bytes.toBytes(2));
try(Connection conn = ConnectionFactory.createConnection(conf);
Table t = conn.getTable(TEST_TABLE)) {
t.put(put);
t.append(append);
}
return null;
}
};
verifyAllowed(appendAction, SUPERUSER, USER_ADMIN, USER_OWNER, USER_CREATE, USER_RW,
USER_GROUP_WRITE);
verifyDenied(appendAction, USER_RO, USER_NONE, USER_GROUP_CREATE, USER_GROUP_READ,
USER_GROUP_ADMIN);
}
示例7: testAppendHook
import org.apache.hadoop.hbase.client.Append; //导入依赖的package包/类
@Test (timeout=300000)
public void testAppendHook() throws IOException {
TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testAppendHook");
Table table = util.createTable(tableName, new byte[][] {A, B, C});
try {
Append app = new Append(Bytes.toBytes(0));
app.add(A, A, A);
verifyMethodResult(SimpleRegionObserver.class,
new String[] {"hadPreAppend", "hadPostAppend", "hadPreAppendAfterRowLock"},
tableName,
new Boolean[] {false, false, false}
);
table.append(app);
verifyMethodResult(SimpleRegionObserver.class,
new String[] {"hadPreAppend", "hadPostAppend", "hadPreAppendAfterRowLock"},
tableName,
new Boolean[] {true, true, true}
);
} finally {
util.deleteTable(tableName);
table.close();
}
}
示例8: testAppendWithReadOnlyTable
import org.apache.hadoop.hbase.client.Append; //导入依赖的package包/类
@Test
public void testAppendWithReadOnlyTable() throws Exception {
byte[] TABLE = Bytes.toBytes("readOnlyTable");
this.region = initHRegion(TABLE, getName(), CONF, true, Bytes.toBytes("somefamily"));
boolean exceptionCaught = false;
Append append = new Append(Bytes.toBytes("somerow"));
append.setDurability(Durability.SKIP_WAL);
append.add(Bytes.toBytes("somefamily"), Bytes.toBytes("somequalifier"),
Bytes.toBytes("somevalue"));
try {
region.append(append);
} catch (IOException e) {
exceptionCaught = true;
} finally {
HRegion.closeHRegion(this.region);
this.region = null;
}
assertTrue(exceptionCaught == true);
}
示例9: getFamilyQualifierToAliasMap
import org.apache.hadoop.hbase.client.Append; //导入依赖的package包/类
NavigableMap<byte[], NavigableMap<byte[], byte[]>> getFamilyQualifierToAliasMap(
MTableDescriptor mTableDescriptor, Mutation mutation)
throws IOException {
NavigableMap<byte[], NavigableMap<byte[], byte[]>> familyQualifierToAliasMap
= new TreeMap<>(Bytes.BYTES_COMPARATOR);
Class<?> mutationClass = mutation.getClass();
if (Append.class.isAssignableFrom(mutationClass)) {
familyQualifierToAliasMap
= getFamilyQualifierToAliasMap(mTableDescriptor, (Append)mutation);
} else if (Increment.class.isAssignableFrom(mutationClass)) {
familyQualifierToAliasMap
= getFamilyQualifierToAliasMap(mTableDescriptor, (Increment)mutation);
} else if (Delete.class.isAssignableFrom(mutationClass)
|| Put.class.isAssignableFrom(mutationClass)
|| RowMutations.class.isAssignableFrom(mutationClass)) {
// ignore: familyQualifierToAliasMap not passed to alias-processing for these mutation-types
}
return familyQualifierToAliasMap;
}
示例10: convertQualifiersToAliases
import org.apache.hadoop.hbase.client.Append; //导入依赖的package包/类
Row convertQualifiersToAliases(MTableDescriptor mTableDescriptor, final Row originalRow,
NavigableMap<byte[], NavigableMap<byte[], byte[]>> familyQualifierToAliasMap,
int intForUniqueSignature)
throws IOException {
// Append, Delete, Get, Increment, Mutation, Put, RowMutations
Class<?> originalRowClass = originalRow.getClass();
if (Append.class.isAssignableFrom(originalRowClass)) {
return convertQualifiersToAliases(
mTableDescriptor, (Append)originalRow, familyQualifierToAliasMap);
} else if (Delete.class.isAssignableFrom(originalRowClass)) {
return convertQualifiersToAliases(mTableDescriptor, (Delete)originalRow);
} else if (Get.class.isAssignableFrom(originalRowClass)) {
return convertQualifiersToAliases(
mTableDescriptor, (Get)originalRow, familyQualifierToAliasMap);
} else if (Increment.class.isAssignableFrom(originalRowClass)) {
return convertQualifiersToAliases(
mTableDescriptor, (Increment)originalRow, familyQualifierToAliasMap);
} else if (Put.class.isAssignableFrom(originalRowClass)) {
return convertQualifiersToAliases(mTableDescriptor, (Put)originalRow);
} else if (RowMutations.class.isAssignableFrom(originalRowClass)) {
return convertQualifiersToAliases(mTableDescriptor, (RowMutations)originalRow);
}
return null;
}
示例11: testBatchProcessing
import org.apache.hadoop.hbase.client.Append; //导入依赖的package包/类
private void testBatchProcessing(Table table) throws IOException, InterruptedException {
List<Row> actions = new LinkedList<>();
actions.add(new Append(ROW_ID_02)
.add(CF01, COLQUALIFIER03, Bytes.toBytes("appendedStringViaBatch")));
actions.add(new Delete(ROW_ID_03).addColumn(CF01, COLQUALIFIER04));
actions.add(new Increment(ROW_ID_02).addColumn(CF01, COLQUALIFIER05, 14));
actions.add(new Put(ROW_ID_05).
addColumn(CF01, COLQUALIFIER04, TABLE_PUT_WITH_LIST).
addColumn(CF02, COLQUALIFIER02, TABLE_PUT_WITH_LIST));
actions.add(new Get(ROW_ID_01).addColumn(CF01, COLQUALIFIER02));
Object[] returnedObjects = new Object[actions.size()];
table.batch(actions, returnedObjects);
int index = 0;
for (Object returnedObject : returnedObjects) {
assertTrue("Table#batch action failed for " + actions.get(index).getClass().getSimpleName(),
returnedObject != null);
if (Get.class.isAssignableFrom(actions.get(index).getClass())) {
Result resultFromGet = (Result)returnedObject;
assertTrue("Table#batch Get action returned unexpected Result: expected <"
+ Bytes.toString(TABLE_PUT_WITH_LIST) + ">, returned <"
+ Bytes.toString(resultFromGet.getValue(CF01, COLQUALIFIER02)) + ">",
Bytes.equals(TABLE_PUT_WITH_LIST, resultFromGet.getValue(CF01, COLQUALIFIER02)));
}
index++;
}
}
示例12: preAppend
import org.apache.hadoop.hbase.client.Append; //导入依赖的package包/类
/**
* @param append append object
* @return result to return to client if default operation should be
* bypassed, null otherwise
* @throws IOException if an error occurred on the coprocessor
*/
public Result preAppend(Append append)
throws IOException {
boolean bypass = false;
Result result = null;
ObserverContext<RegionCoprocessorEnvironment> ctx = null;
for (RegionEnvironment env: coprocessors) {
if (env.getInstance() instanceof RegionObserver) {
ctx = ObserverContext.createAndPrepare(env, ctx);
try {
result = ((RegionObserver)env.getInstance()).preAppend(ctx, append);
} catch (Throwable e) {
handleCoprocessorThrowable(env, e);
}
bypass |= ctx.shouldBypass();
if (ctx.shouldComplete()) {
break;
}
}
}
return bypass ? result : null;
}
示例13: postAppend
import org.apache.hadoop.hbase.client.Append; //导入依赖的package包/类
/**
* @param append Append object
* @param result the result returned by postAppend
* @throws IOException if an error occurred on the coprocessor
*/
public void postAppend(final Append append, Result result)
throws IOException {
ObserverContext<RegionCoprocessorEnvironment> ctx = null;
for (RegionEnvironment env: coprocessors) {
if (env.getInstance() instanceof RegionObserver) {
ctx = ObserverContext.createAndPrepare(env, ctx);
try {
((RegionObserver)env.getInstance()).postAppend(ctx, append, result);
} catch (Throwable e) {
handleCoprocessorThrowable(env, e);
}
if (ctx.shouldComplete()) {
break;
}
}
}
}
示例14: testAppendWithReadOnlyTable
import org.apache.hadoop.hbase.client.Append; //导入依赖的package包/类
public void testAppendWithReadOnlyTable() throws Exception {
byte[] TABLE = Bytes.toBytes("readOnlyTable");
this.region = initHRegion(TABLE, getName(), conf, true, Bytes.toBytes("somefamily"));
boolean exceptionCaught = false;
Append append = new Append(Bytes.toBytes("somerow"));
append.add(Bytes.toBytes("somefamily"), Bytes.toBytes("somequalifier"),
Bytes.toBytes("somevalue"));
try {
region.append(append, false);
} catch (IOException e) {
exceptionCaught = true;
} finally {
HRegion.closeHRegion(this.region);
this.region = null;
}
assertTrue(exceptionCaught == true);
}
示例15: issueRequest
import org.apache.hadoop.hbase.client.Append; //导入依赖的package包/类
ListenableFuture<? extends GeneratedMessage> issueRequest(Row row) {
if (row instanceof Put) {
return issuePutRequest((Put) row);
} else if (row instanceof Delete) {
return issueDeleteRequest((Delete) row);
} else if (row instanceof Append) {
return issueAppendRequest((Append) row);
} else if (row instanceof Increment) {
return issueIncrementRequest((Increment) row);
} else if (row instanceof Get) {
return issueGetRequest((Get) row);
} else if (row instanceof RowMutations) {
return issueRowMutationsRequest((RowMutations) row);
}
LOG.error("Encountered unknown action type %s", row.getClass());
return Futures.immediateFailedFuture(
new IllegalArgumentException("Encountered unknown action type: " + row.getClass()));
}