本文整理匯總了Java中org.apache.hadoop.hbase.client.Increment類的典型用法代碼示例。如果您正苦於以下問題:Java Increment類的具體用法?Java Increment怎麽用?Java Increment使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
Increment類屬於org.apache.hadoop.hbase.client包,在下文中一共展示了Increment類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: testIncrWithReadOnlyTable
import org.apache.hadoop.hbase.client.Increment; //導入依賴的package包/類
@Test
public void testIncrWithReadOnlyTable() throws Exception {
byte[] TABLE = Bytes.toBytes("readOnlyTable");
this.region = initHRegion(TABLE, getName(), CONF, true, Bytes.toBytes("somefamily"));
boolean exceptionCaught = false;
Increment inc = new Increment(Bytes.toBytes("somerow"));
inc.setDurability(Durability.SKIP_WAL);
inc.addColumn(Bytes.toBytes("somefamily"), Bytes.toBytes("somequalifier"), 1L);
try {
region.increment(inc);
} catch (IOException e) {
exceptionCaught = true;
} finally {
HRegion.closeHRegion(this.region);
this.region = null;
}
assertTrue(exceptionCaught == true);
}
示例2: createIncrementOperation
import org.apache.hadoop.hbase.client.Increment; //導入依賴的package包/類
/**
* Builds a single {@link Increment} object for a row, with one-many cell
* increments in that row
*
* @param rowKey The rowKey of the row to be updated
* @param cells A list of objects containing the column qualifier and cell
* increment value
* @return The completed {@link Increment} object
*/
private Increment createIncrementOperation(final RowKey rowKey, final List<CountCellIncrementHolder> cells) {
LOGGER.trace(() -> String.format("createIncrementOperation called for rowKey: %s with cell count %s",
rowKey.toString(),
cells.size()));
final Increment increment = new Increment(rowKey.asByteArray());
// TODO HBase 2.0 has Increment.setReturnResults to allow you to prevent
// the return of the new
// value to improve performance. In our case we don't care about the new
// value so when we
// upgrade to HBase 2.0 we need to add this line in.
// increment.setReturnResults(false);
//if we have multiple CCIHs for the same rowKey/colQual then hbase seems to only process one of them
//Due to the way the data is passed through to this method we should not get multiple increments for the
//same rowKey/colQual so we will not check for it due to the cost of doing that.
for (final CountCellIncrementHolder cell : cells) {
increment.addColumn(EventStoreColumnFamily.COUNTS.asByteArray(), cell.getColumnQualifier().getBytes(),
cell.getCellIncrementValue());
}
return increment;
}
示例3: incrementFromThrift
import org.apache.hadoop.hbase.client.Increment; //導入依賴的package包/類
public static Increment incrementFromThrift(TIncrement in) throws IOException {
Increment out = new Increment(in.getRow());
for (TColumnIncrement column : in.getColumns()) {
out.addColumn(column.getFamily(), column.getQualifier(), column.getAmount());
}
if (in.isSetAttributes()) {
addAttributes(out,in.getAttributes());
}
if (in.isSetDurability()) {
out.setDurability(durabilityFromThrift(in.getDurability()));
}
if(in.getCellVisibility() != null) {
out.setCellVisibility(new CellVisibility(in.getCellVisibility().getExpression()));
}
return out;
}
示例4: increment
import org.apache.hadoop.hbase.client.Increment; //導入依賴的package包/類
@Override
public void increment(TIncrement tincrement) throws IOError, TException {
if (tincrement.getRow().length == 0 || tincrement.getTable().length == 0) {
throw new TException("Must supply a table and a row key; can't increment");
}
if (conf.getBoolean(COALESCE_INC_KEY, false)) {
this.coalescer.queueIncrement(tincrement);
return;
}
Table table = null;
try {
table = getTable(tincrement.getTable());
Increment inc = ThriftUtilities.incrementFromThrift(tincrement);
table.increment(inc);
} catch (IOException e) {
LOG.warn(e.getMessage(), e);
throw new IOError(Throwables.getStackTraceAsString(e));
} finally{
closeTable(table);
}
}
示例5: preIncrementAfterRowLock
import org.apache.hadoop.hbase.client.Increment; //導入依賴的package包/類
@Override
public Result preIncrementAfterRowLock(final ObserverContext<RegionCoprocessorEnvironment> c,
final Increment increment) throws IOException {
if (increment.getAttribute(CHECK_COVERING_PERM) != null) {
// We had failure with table, cf and q perm checks and now giving a chance for cell
// perm check
TableName table = c.getEnvironment().getRegion().getRegionInfo().getTable();
AuthResult authResult = null;
if (checkCoveringPermission(OpType.INCREMENT, c.getEnvironment(), increment.getRow(),
increment.getFamilyCellMap(), increment.getTimeRange().getMax(), Action.WRITE)) {
authResult = AuthResult.allow(OpType.INCREMENT.toString(), "Covering cell set",
getActiveUser(), Action.WRITE, table, increment.getFamilyCellMap());
} else {
authResult = AuthResult.deny(OpType.INCREMENT.toString(), "Covering cell set",
getActiveUser(), Action.WRITE, table, increment.getFamilyCellMap());
}
logResult(authResult);
if (authorizationEnabled && !authResult.isAllowed()) {
throw new AccessDeniedException("Insufficient permissions " +
authResult.toContextString());
}
}
return null;
}
示例6: verifyUserDeniedForIncrementMultipleVersions
import org.apache.hadoop.hbase.client.Increment; //導入依賴的package包/類
private void verifyUserDeniedForIncrementMultipleVersions(final User user, final byte[] row,
final byte[] q1) throws IOException, InterruptedException {
user.runAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
try (Connection connection = ConnectionFactory.createConnection(conf)) {
try (Table t = connection.getTable(TEST_TABLE.getTableName())) {
Increment inc = new Increment(row);
inc.setTimeRange(0, 127);
inc.addColumn(TEST_FAMILY1, q1, 2L);
t.increment(inc);
fail(user.getShortName() + " cannot do the increment.");
} catch (Exception e) {
}
}
return null;
}
});
}
示例7: testIncrementHook
import org.apache.hadoop.hbase.client.Increment; //導入依賴的package包/類
@Test (timeout=300000)
public void testIncrementHook() throws IOException {
TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testIncrementHook");
Table table = util.createTable(tableName, new byte[][] {A, B, C});
try {
Increment inc = new Increment(Bytes.toBytes(0));
inc.addColumn(A, A, 1);
verifyMethodResult(SimpleRegionObserver.class,
new String[] {"hadPreIncrement", "hadPostIncrement", "hadPreIncrementAfterRowLock"},
tableName,
new Boolean[] {false, false, false}
);
table.increment(inc);
verifyMethodResult(SimpleRegionObserver.class,
new String[] {"hadPreIncrement", "hadPostIncrement", "hadPreIncrementAfterRowLock"},
tableName,
new Boolean[] {true, true, true}
);
} finally {
util.deleteTable(tableName);
table.close();
}
}
示例8: testIncrementWithReturnResultsSetToFalse
import org.apache.hadoop.hbase.client.Increment; //導入依賴的package包/類
@Test
public void testIncrementWithReturnResultsSetToFalse() throws Exception {
byte[] row1 = Bytes.toBytes("row1");
byte[] col1 = Bytes.toBytes("col1");
// Setting up region
final WALFactory wals = new WALFactory(CONF, null, "testIncrementWithReturnResultsSetToFalse");
byte[] tableName = Bytes.toBytes("testIncrementWithReturnResultsSetToFalse");
final WAL wal = wals.getWAL(tableName);
HRegion region = createHRegion(tableName, "increment", wal, Durability.USE_DEFAULT);
Increment inc1 = new Increment(row1);
inc1.setReturnResults(false);
inc1.addColumn(FAMILY, col1, 1);
Result res = region.increment(inc1);
assertNull(res);
}
示例9: incrementColumnValue
import org.apache.hadoop.hbase.client.Increment; //導入依賴的package包/類
@Override
public long incrementColumnValue(byte[] rowId, byte[] colFamily, byte[] colQualifier, long l)
throws IOException {
// ColumnManager validation
Increment increment = null;
if (includedInRepositoryProcessing) {
increment = new Increment(rowId).addColumn(colFamily, colQualifier, l);
if (mTableDescriptor.hasColDescriptorWithColDefinitionsEnforced()) {
repository.validateColumns(mTableDescriptor, increment);
}
}
// Standard HBase processing (with aliasing, if necessary)
long returnedLong;
if (includedInRepositoryProcessing
&& mTableDescriptor.hasColDescriptorWithColAliasesEnabled()) {
returnedLong = wrappedTable.incrementColumnValue(rowId, colFamily,
repository.getAlias(mTableDescriptor, colFamily, colQualifier), l);
} else {
returnedLong = wrappedTable.incrementColumnValue(rowId, colFamily, colQualifier, l);
}
// ColumnManager auditing
if (includedInRepositoryProcessing) {
repository.putColumnAuditorSchemaEntities(mTableDescriptor, increment);
}
return returnedLong;
}
示例10: getFamilyQualifierToAliasMap
import org.apache.hadoop.hbase.client.Increment; //導入依賴的package包/類
NavigableMap<byte[], NavigableMap<byte[], byte[]>> getFamilyQualifierToAliasMap(
MTableDescriptor mTableDescriptor, Mutation mutation)
throws IOException {
NavigableMap<byte[], NavigableMap<byte[], byte[]>> familyQualifierToAliasMap
= new TreeMap<>(Bytes.BYTES_COMPARATOR);
Class<?> mutationClass = mutation.getClass();
if (Append.class.isAssignableFrom(mutationClass)) {
familyQualifierToAliasMap
= getFamilyQualifierToAliasMap(mTableDescriptor, (Append)mutation);
} else if (Increment.class.isAssignableFrom(mutationClass)) {
familyQualifierToAliasMap
= getFamilyQualifierToAliasMap(mTableDescriptor, (Increment)mutation);
} else if (Delete.class.isAssignableFrom(mutationClass)
|| Put.class.isAssignableFrom(mutationClass)
|| RowMutations.class.isAssignableFrom(mutationClass)) {
// ignore: familyQualifierToAliasMap not passed to alias-processing for these mutation-types
}
return familyQualifierToAliasMap;
}
示例11: convertQualifiersToAliases
import org.apache.hadoop.hbase.client.Increment; //導入依賴的package包/類
Row convertQualifiersToAliases(MTableDescriptor mTableDescriptor, final Row originalRow,
NavigableMap<byte[], NavigableMap<byte[], byte[]>> familyQualifierToAliasMap,
int intForUniqueSignature)
throws IOException {
// Append, Delete, Get, Increment, Mutation, Put, RowMutations
Class<?> originalRowClass = originalRow.getClass();
if (Append.class.isAssignableFrom(originalRowClass)) {
return convertQualifiersToAliases(
mTableDescriptor, (Append)originalRow, familyQualifierToAliasMap);
} else if (Delete.class.isAssignableFrom(originalRowClass)) {
return convertQualifiersToAliases(mTableDescriptor, (Delete)originalRow);
} else if (Get.class.isAssignableFrom(originalRowClass)) {
return convertQualifiersToAliases(
mTableDescriptor, (Get)originalRow, familyQualifierToAliasMap);
} else if (Increment.class.isAssignableFrom(originalRowClass)) {
return convertQualifiersToAliases(
mTableDescriptor, (Increment)originalRow, familyQualifierToAliasMap);
} else if (Put.class.isAssignableFrom(originalRowClass)) {
return convertQualifiersToAliases(mTableDescriptor, (Put)originalRow);
} else if (RowMutations.class.isAssignableFrom(originalRowClass)) {
return convertQualifiersToAliases(mTableDescriptor, (RowMutations)originalRow);
}
return null;
}
示例12: testBatchProcessing
import org.apache.hadoop.hbase.client.Increment; //導入依賴的package包/類
private void testBatchProcessing(Table table) throws IOException, InterruptedException {
List<Row> actions = new LinkedList<>();
actions.add(new Append(ROW_ID_02)
.add(CF01, COLQUALIFIER03, Bytes.toBytes("appendedStringViaBatch")));
actions.add(new Delete(ROW_ID_03).addColumn(CF01, COLQUALIFIER04));
actions.add(new Increment(ROW_ID_02).addColumn(CF01, COLQUALIFIER05, 14));
actions.add(new Put(ROW_ID_05).
addColumn(CF01, COLQUALIFIER04, TABLE_PUT_WITH_LIST).
addColumn(CF02, COLQUALIFIER02, TABLE_PUT_WITH_LIST));
actions.add(new Get(ROW_ID_01).addColumn(CF01, COLQUALIFIER02));
Object[] returnedObjects = new Object[actions.size()];
table.batch(actions, returnedObjects);
int index = 0;
for (Object returnedObject : returnedObjects) {
assertTrue("Table#batch action failed for " + actions.get(index).getClass().getSimpleName(),
returnedObject != null);
if (Get.class.isAssignableFrom(actions.get(index).getClass())) {
Result resultFromGet = (Result)returnedObject;
assertTrue("Table#batch Get action returned unexpected Result: expected <"
+ Bytes.toString(TABLE_PUT_WITH_LIST) + ">, returned <"
+ Bytes.toString(resultFromGet.getValue(CF01, COLQUALIFIER02)) + ">",
Bytes.equals(TABLE_PUT_WITH_LIST, resultFromGet.getValue(CF01, COLQUALIFIER02)));
}
index++;
}
}
示例13: updateProfileCountsForSaleInHBase
import org.apache.hadoop.hbase.client.Increment; //導入依賴的package包/類
public void updateProfileCountsForSaleInHBase(Long buyerId, Long sellerId, ItemSaleEvent event) throws IOException, InterruptedException {
HTableInterface profileTable = hTablePool.getTable(DataModelConsts.PROFILE_TABLE);
ArrayList<Row> actions = new ArrayList<Row>();
Increment buyerValueIncrement = new Increment(generateProfileRowKey(buyerId));
buyerValueIncrement.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.CURRENT_LOG_IN_PURCHASES_VALUE_COL, event.getItemValue());
buyerValueIncrement.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.TOTAL_VALUE_OF_PAST_SELLS_COL, event.getItemValue());
actions.add(buyerValueIncrement);
Increment sellerValueIncrement = new Increment(generateProfileRowKey(sellerId));
sellerValueIncrement.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.CURRENT_LOG_IN_SELLS_VALUE_COL, event.getItemValue());
sellerValueIncrement.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.TOTAL_VALUE_OF_PAST_SELLS_COL, event.getItemValue());
actions.add(sellerValueIncrement);
profileTable.batch(actions);
}
示例14: logInProfileInHBase
import org.apache.hadoop.hbase.client.Increment; //導入依賴的package包/類
public void logInProfileInHBase(long userId, String ipAddress) throws IOException, Exception {
HTableInterface profileTable = hTablePool.getTable(DataModelConsts.PROFILE_TABLE);
ArrayList<Row> actions = new ArrayList<Row>();
byte[] profileRowKey = generateProfileRowKey(userId);
Delete delete = new Delete(profileRowKey);
delete.deleteColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.CURRENT_LOG_IN_PURCHASES_VALUE_COL);
delete.deleteColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.CURRENT_LOG_IN_SELLS_VALUE_COL);
actions.add(delete);
Increment increment = new Increment(profileRowKey);
increment.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.LOG_IN_COUNT_COL, 1);
actions.add(increment);
Put put = new Put(profileRowKey);
put.add(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.LAST_LOG_IN_COL, Bytes.toBytes(System.currentTimeMillis()));
put.add(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.LOG_IN_IP_ADDERSSES, Bytes.toBytes(ipAddress));
actions.add(put);
profileTable.batch(actions);
}
示例15: createProfile
import org.apache.hadoop.hbase.client.Increment; //導入依賴的package包/類
@Override
public void createProfile(long userId, ProfilePojo pojo, String ipAddress) throws Exception {
HTableInterface profileTable = hTablePool.getTable(DataModelConsts.PROFILE_TABLE);
ArrayList<Row> actions = new ArrayList<Row>();
byte[] rowKey = generateProfileRowKey(userId);
Put put = new Put(rowKey);
put.add(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.FIXED_INFO_COL, Bytes.toBytes(pojo.getUsername() + "|" + pojo.getAge() + "|" + System.currentTimeMillis()));
put.add(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.LOG_IN_IP_ADDERSSES, Bytes.toBytes(ipAddress));
put.add(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.LAST_LOG_IN_COL, Bytes.toBytes(System.currentTimeMillis()));
actions.add(put);
Increment increment = new Increment(rowKey);
increment.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.LOG_IN_COUNT_COL, 1);
increment.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.TOTAL_SELLS_COL, 0);
increment.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.TOTAL_PURCHASES_COL, 0);
increment.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.TOTAL_VALUE_OF_PAST_PURCHASES_COL, 0);
increment.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.TOTAL_VALUE_OF_PAST_SELLS_COL, 0);
increment.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.CURRENT_LOG_IN_SELLS_VALUE_COL, 0);
increment.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.CURRENT_LOG_IN_PURCHASES_VALUE_COL, 0);
actions.add(increment);
profileTable.batch(actions);
}