本文整理匯總了Java中org.apache.hadoop.hbase.client.Increment.addColumn方法的典型用法代碼示例。如果您正苦於以下問題:Java Increment.addColumn方法的具體用法?Java Increment.addColumn怎麽用?Java Increment.addColumn使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.hbase.client.Increment
的用法示例。
在下文中一共展示了Increment.addColumn方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: testIncrWithReadOnlyTable
import org.apache.hadoop.hbase.client.Increment; //導入方法依賴的package包/類
@Test
public void testIncrWithReadOnlyTable() throws Exception {
byte[] TABLE = Bytes.toBytes("readOnlyTable");
this.region = initHRegion(TABLE, getName(), CONF, true, Bytes.toBytes("somefamily"));
boolean exceptionCaught = false;
Increment inc = new Increment(Bytes.toBytes("somerow"));
inc.setDurability(Durability.SKIP_WAL);
inc.addColumn(Bytes.toBytes("somefamily"), Bytes.toBytes("somequalifier"), 1L);
try {
region.increment(inc);
} catch (IOException e) {
exceptionCaught = true;
} finally {
HRegion.closeHRegion(this.region);
this.region = null;
}
assertTrue(exceptionCaught == true);
}
示例2: createIncrementOperation
import org.apache.hadoop.hbase.client.Increment; //導入方法依賴的package包/類
/**
* Builds a single {@link Increment} object for a row, with one-many cell
* increments in that row
*
* @param rowKey The rowKey of the row to be updated
* @param cells A list of objects containing the column qualifier and cell
* increment value
* @return The completed {@link Increment} object
*/
private Increment createIncrementOperation(final RowKey rowKey, final List<CountCellIncrementHolder> cells) {
LOGGER.trace(() -> String.format("createIncrementOperation called for rowKey: %s with cell count %s",
rowKey.toString(),
cells.size()));
final Increment increment = new Increment(rowKey.asByteArray());
// TODO HBase 2.0 has Increment.setReturnResults to allow you to prevent
// the return of the new
// value to improve performance. In our case we don't care about the new
// value so when we
// upgrade to HBase 2.0 we need to add this line in.
// increment.setReturnResults(false);
//if we have multiple CCIHs for the same rowKey/colQual then hbase seems to only process one of them
//Due to the way the data is passed through to this method we should not get multiple increments for the
//same rowKey/colQual so we will not check for it due to the cost of doing that.
for (final CountCellIncrementHolder cell : cells) {
increment.addColumn(EventStoreColumnFamily.COUNTS.asByteArray(), cell.getColumnQualifier().getBytes(),
cell.getCellIncrementValue());
}
return increment;
}
示例3: incrementFromThrift
import org.apache.hadoop.hbase.client.Increment; //導入方法依賴的package包/類
public static Increment incrementFromThrift(TIncrement in) throws IOException {
Increment out = new Increment(in.getRow());
for (TColumnIncrement column : in.getColumns()) {
out.addColumn(column.getFamily(), column.getQualifier(), column.getAmount());
}
if (in.isSetAttributes()) {
addAttributes(out,in.getAttributes());
}
if (in.isSetDurability()) {
out.setDurability(durabilityFromThrift(in.getDurability()));
}
if(in.getCellVisibility() != null) {
out.setCellVisibility(new CellVisibility(in.getCellVisibility().getExpression()));
}
return out;
}
示例4: testIncrementHook
import org.apache.hadoop.hbase.client.Increment; //導入方法依賴的package包/類
@Test (timeout=300000)
public void testIncrementHook() throws IOException {
TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testIncrementHook");
Table table = util.createTable(tableName, new byte[][] {A, B, C});
try {
Increment inc = new Increment(Bytes.toBytes(0));
inc.addColumn(A, A, 1);
verifyMethodResult(SimpleRegionObserver.class,
new String[] {"hadPreIncrement", "hadPostIncrement", "hadPreIncrementAfterRowLock"},
tableName,
new Boolean[] {false, false, false}
);
table.increment(inc);
verifyMethodResult(SimpleRegionObserver.class,
new String[] {"hadPreIncrement", "hadPostIncrement", "hadPreIncrementAfterRowLock"},
tableName,
new Boolean[] {true, true, true}
);
} finally {
util.deleteTable(tableName);
table.close();
}
}
示例5: testIncrementWithReturnResultsSetToFalse
import org.apache.hadoop.hbase.client.Increment; //導入方法依賴的package包/類
@Test
public void testIncrementWithReturnResultsSetToFalse() throws Exception {
byte[] row1 = Bytes.toBytes("row1");
byte[] col1 = Bytes.toBytes("col1");
// Setting up region
final WALFactory wals = new WALFactory(CONF, null, "testIncrementWithReturnResultsSetToFalse");
byte[] tableName = Bytes.toBytes("testIncrementWithReturnResultsSetToFalse");
final WAL wal = wals.getWAL(tableName);
HRegion region = createHRegion(tableName, "increment", wal, Durability.USE_DEFAULT);
Increment inc1 = new Increment(row1);
inc1.setReturnResults(false);
inc1.addColumn(FAMILY, col1, 1);
Result res = region.increment(inc1);
assertNull(res);
}
示例6: updateProfileCountsForSaleInHBase
import org.apache.hadoop.hbase.client.Increment; //導入方法依賴的package包/類
public void updateProfileCountsForSaleInHBase(Long buyerId, Long sellerId, ItemSaleEvent event) throws IOException, InterruptedException {
HTableInterface profileTable = hTablePool.getTable(DataModelConsts.PROFILE_TABLE);
ArrayList<Row> actions = new ArrayList<Row>();
Increment buyerValueIncrement = new Increment(generateProfileRowKey(buyerId));
buyerValueIncrement.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.CURRENT_LOG_IN_PURCHASES_VALUE_COL, event.getItemValue());
buyerValueIncrement.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.TOTAL_VALUE_OF_PAST_SELLS_COL, event.getItemValue());
actions.add(buyerValueIncrement);
Increment sellerValueIncrement = new Increment(generateProfileRowKey(sellerId));
sellerValueIncrement.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.CURRENT_LOG_IN_SELLS_VALUE_COL, event.getItemValue());
sellerValueIncrement.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.TOTAL_VALUE_OF_PAST_SELLS_COL, event.getItemValue());
actions.add(sellerValueIncrement);
profileTable.batch(actions);
}
示例7: logInProfileInHBase
import org.apache.hadoop.hbase.client.Increment; //導入方法依賴的package包/類
public void logInProfileInHBase(long userId, String ipAddress) throws IOException, Exception {
HTableInterface profileTable = hTablePool.getTable(DataModelConsts.PROFILE_TABLE);
ArrayList<Row> actions = new ArrayList<Row>();
byte[] profileRowKey = generateProfileRowKey(userId);
Delete delete = new Delete(profileRowKey);
delete.deleteColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.CURRENT_LOG_IN_PURCHASES_VALUE_COL);
delete.deleteColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.CURRENT_LOG_IN_SELLS_VALUE_COL);
actions.add(delete);
Increment increment = new Increment(profileRowKey);
increment.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.LOG_IN_COUNT_COL, 1);
actions.add(increment);
Put put = new Put(profileRowKey);
put.add(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.LAST_LOG_IN_COL, Bytes.toBytes(System.currentTimeMillis()));
put.add(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.LOG_IN_IP_ADDERSSES, Bytes.toBytes(ipAddress));
actions.add(put);
profileTable.batch(actions);
}
示例8: createProfile
import org.apache.hadoop.hbase.client.Increment; //導入方法依賴的package包/類
@Override
public void createProfile(long userId, ProfilePojo pojo, String ipAddress) throws Exception {
HTableInterface profileTable = hTablePool.getTable(DataModelConsts.PROFILE_TABLE);
ArrayList<Row> actions = new ArrayList<Row>();
byte[] rowKey = generateProfileRowKey(userId);
Put put = new Put(rowKey);
put.add(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.FIXED_INFO_COL, Bytes.toBytes(pojo.getUsername() + "|" + pojo.getAge() + "|" + System.currentTimeMillis()));
put.add(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.LOG_IN_IP_ADDERSSES, Bytes.toBytes(ipAddress));
put.add(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.LAST_LOG_IN_COL, Bytes.toBytes(System.currentTimeMillis()));
actions.add(put);
Increment increment = new Increment(rowKey);
increment.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.LOG_IN_COUNT_COL, 1);
increment.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.TOTAL_SELLS_COL, 0);
increment.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.TOTAL_PURCHASES_COL, 0);
increment.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.TOTAL_VALUE_OF_PAST_PURCHASES_COL, 0);
increment.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.TOTAL_VALUE_OF_PAST_SELLS_COL, 0);
increment.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.CURRENT_LOG_IN_SELLS_VALUE_COL, 0);
increment.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.CURRENT_LOG_IN_PURCHASES_VALUE_COL, 0);
actions.add(increment);
profileTable.batch(actions);
}
示例9: testIncrementHook
import org.apache.hadoop.hbase.client.Increment; //導入方法依賴的package包/類
@Test
public void testIncrementHook() throws IOException {
TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testIncrementHook");
HTable table = util.createTable(tableName, new byte[][] {A, B, C});
try {
Increment inc = new Increment(Bytes.toBytes(0));
inc.addColumn(A, A, 1);
verifyMethodResult(SimpleRegionObserver.class,
new String[] {"hadPreIncrement", "hadPostIncrement"},
tableName,
new Boolean[] {false, false}
);
table.increment(inc);
verifyMethodResult(SimpleRegionObserver.class,
new String[] {"hadPreIncrement", "hadPostIncrement"},
tableName,
new Boolean[] {true, true}
);
} finally {
util.deleteTable(tableName);
table.close();
}
}
示例10: run
import org.apache.hadoop.hbase.client.Increment; //導入方法依賴的package包/類
@Override
public void run() {
for (int i=0; i<numIncrements; i++) {
try {
Increment inc = new Increment(row);
inc.addColumn(fam1, qual1, amount);
inc.addColumn(fam1, qual2, amount*2);
inc.addColumn(fam2, qual3, amount*3);
region.increment(inc);
// verify: Make sure we only see completed increments
Get g = new Get(row);
Result result = region.get(g);
assertEquals(Bytes.toLong(result.getValue(fam1, qual1))*2, Bytes.toLong(result.getValue(fam1, qual2)));
assertEquals(Bytes.toLong(result.getValue(fam1, qual1))*3, Bytes.toLong(result.getValue(fam2, qual3)));
} catch (IOException e) {
e.printStackTrace();
}
}
}
示例11: incrementColumnValue
import org.apache.hadoop.hbase.client.Increment; //導入方法依賴的package包/類
@Override
public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount)
throws IOException {
LOG.trace("incrementColumnValue(byte[], byte[], byte[], long)");
Increment incr = new Increment(row);
incr.addColumn(family, qualifier, amount);
Result result = increment(incr);
Cell cell = result.getColumnLatestCell(family, qualifier);
if (cell == null) {
LOG.error("Failed to find a incremented value in result of increment");
throw new IOException(
makeGenericExceptionMessage(
"increment",
options.getProjectId(),
tableName.getQualifierAsString(),
row));
}
return Bytes.toLong(CellUtil.cloneValue(cell));
}
示例12: testSingleIncrement
import org.apache.hadoop.hbase.client.Increment; //導入方法依賴的package包/類
@Test
public void testSingleIncrement() throws IOException {
byte[] rowKey = dataHelper.randomData("rk1-");
byte[] family = Bytes.toBytes("family");
byte[] qualifier = Bytes.toBytes("qualifier");
long amount = 1234;
Increment incr = new Increment(rowKey);
incr.addColumn(family, qualifier, amount);
ReadModifyWriteRowRequest.Builder requestBuilder = incrementAdapter.adapt(incr);
Assert.assertEquals(1, requestBuilder.getRulesCount());
ReadModifyWriteRule rule = requestBuilder.getRules(0);
Assert.assertEquals("qualifier", rule.getColumnQualifier().toStringUtf8());
Assert.assertEquals("family", rule.getFamilyName());
Assert.assertEquals(amount, rule.getIncrementAmount());
}
示例13: run
import org.apache.hadoop.hbase.client.Increment; //導入方法依賴的package包/類
@Override
public void run() {
for (int i=0; i<numIncrements; i++) {
try {
Increment inc = new Increment(row);
inc.addColumn(fam1, qual1, amount);
inc.addColumn(fam1, qual2, amount*2);
inc.addColumn(fam2, qual3, amount*3);
inc.setDurability(Durability.ASYNC_WAL);
region.increment(inc);
// verify: Make sure we only see completed increments
Get g = new Get(row);
Result result = region.get(g);
assertEquals(Bytes.toLong(result.getValue(fam1, qual1))*2, Bytes.toLong(result.getValue(fam1, qual2)));
assertEquals(Bytes.toLong(result.getValue(fam1, qual1))*3, Bytes.toLong(result.getValue(fam2, qual3)));
} catch (IOException e) {
e.printStackTrace();
}
}
}
示例14: getIncrements
import org.apache.hadoop.hbase.client.Increment; //導入方法依賴的package包/類
@Override
public List<Increment> getIncrements() {
List<Increment> incs = new LinkedList<Increment>();
if (incCol != null) {
Increment inc = new Increment(incrementRow);
inc.addColumn(cf, incCol, 1);
incs.add(inc);
}
return incs;
}
示例15: getIncrements
import org.apache.hadoop.hbase.client.Increment; //導入方法依賴的package包/類
@Override
public List<Increment> getIncrements() {
List<Increment> increments = Lists.newArrayList();
String body = new String(event.getBody(), Charsets.UTF_8);
String[] pieces = body.split(":");
String row = pieces[0];
String qualifier = pieces[1];
Increment inc = new Increment(row.getBytes(Charsets.UTF_8));
inc.addColumn(family, qualifier.getBytes(Charsets.UTF_8), 1L);
increments.add(inc);
return increments;
}