本文整理匯總了Java中org.apache.hadoop.hbase.client.Row類的典型用法代碼示例。如果您正苦於以下問題:Java Row類的具體用法?Java Row怎麽用?Java Row使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
Row類屬於org.apache.hadoop.hbase.client包,在下文中一共展示了Row類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: getActions
import org.apache.hadoop.hbase.client.Row; //導入依賴的package包/類
@Override
public List<Row> getActions() throws FlumeException {
List<Row> actions = new LinkedList<Row>();
if (plCol != null) {
byte[] rowKey;
try {
if (keyType == KeyType.TS) {
rowKey = SimpleRowKeyGenerator.getTimestampKey(rowPrefix);
} else if (keyType == KeyType.RANDOM) {
rowKey = SimpleRowKeyGenerator.getRandomKey(rowPrefix);
} else if (keyType == KeyType.TSNANO) {
rowKey = SimpleRowKeyGenerator.getNanoTimestampKey(rowPrefix);
} else {
rowKey = SimpleRowKeyGenerator.getUUIDKey(rowPrefix);
}
Put put = new Put(rowKey);
put.add(cf, plCol, payload);
actions.add(put);
} catch (Exception e) {
throw new FlumeException("Could not get row key!", e);
}
}
return actions;
}
示例2: call
import org.apache.hadoop.hbase.client.Row; //導入依賴的package包/類
/**
* Method perform a HBase batch operation.
*/
public void call() throws Exception {
List<Row> rowList = new ArrayList<>(batchSize);
final Object[] results = new Object[batchSize];
for ( T object : objectCollection ) {
final Row row = objectMapper.apply(object);
rowList.add(row);
//reach batch limit size, flush index data to HBase
if ( rowList.size() >= batchSize ) {
table.batch(rowList, results);
throwIfBatchFailed(results);
rowList.clear();
}
}
//save remaining index data
if ( !rowList.isEmpty() ) {
final Object[] errors = new Object[rowList.size()];
table.batch(rowList, errors);
throwIfBatchFailed(errors);
}
}
示例3: processBatchCallback
import org.apache.hadoop.hbase.client.Row; //導入依賴的package包/類
/**
* Randomly pick a connection and process the batch of actions for a given table
* @param actions the actions
* @param tableName table name
* @param results the results array
* @param callback
* @throws IOException
*/
@SuppressWarnings("deprecation")
public <R> void processBatchCallback(List<? extends Row> actions, TableName tableName,
Object[] results, Batch.Callback<R> callback) throws IOException {
// Currently used by RegionStateStore
// A deprecated method is used as multiple threads accessing RegionStateStore do a single put
// and htable is not thread safe. Alternative would be to create an Htable instance for each
// put but that is not very efficient.
// See HBASE-11610 for more details.
try {
hConnections[ThreadLocalRandom.current().nextInt(noOfConnections)].processBatchCallback(
actions, tableName, this.batchPool, results, callback);
} catch (InterruptedException e) {
throw new InterruptedIOException(e.getMessage());
}
}
示例4: convertQualifiersToAliases
import org.apache.hadoop.hbase.client.Row; //導入依賴的package包/類
Row convertQualifiersToAliases(MTableDescriptor mTableDescriptor, final Row originalRow,
NavigableMap<byte[], NavigableMap<byte[], byte[]>> familyQualifierToAliasMap,
int intForUniqueSignature)
throws IOException {
// Append, Delete, Get, Increment, Mutation, Put, RowMutations
Class<?> originalRowClass = originalRow.getClass();
if (Append.class.isAssignableFrom(originalRowClass)) {
return convertQualifiersToAliases(
mTableDescriptor, (Append)originalRow, familyQualifierToAliasMap);
} else if (Delete.class.isAssignableFrom(originalRowClass)) {
return convertQualifiersToAliases(mTableDescriptor, (Delete)originalRow);
} else if (Get.class.isAssignableFrom(originalRowClass)) {
return convertQualifiersToAliases(
mTableDescriptor, (Get)originalRow, familyQualifierToAliasMap);
} else if (Increment.class.isAssignableFrom(originalRowClass)) {
return convertQualifiersToAliases(
mTableDescriptor, (Increment)originalRow, familyQualifierToAliasMap);
} else if (Put.class.isAssignableFrom(originalRowClass)) {
return convertQualifiersToAliases(mTableDescriptor, (Put)originalRow);
} else if (RowMutations.class.isAssignableFrom(originalRowClass)) {
return convertQualifiersToAliases(mTableDescriptor, (RowMutations)originalRow);
}
return null;
}
示例5: testBatchProcessing
import org.apache.hadoop.hbase.client.Row; //導入依賴的package包/類
private void testBatchProcessing(Table table) throws IOException, InterruptedException {
List<Row> actions = new LinkedList<>();
actions.add(new Append(ROW_ID_02)
.add(CF01, COLQUALIFIER03, Bytes.toBytes("appendedStringViaBatch")));
actions.add(new Delete(ROW_ID_03).addColumn(CF01, COLQUALIFIER04));
actions.add(new Increment(ROW_ID_02).addColumn(CF01, COLQUALIFIER05, 14));
actions.add(new Put(ROW_ID_05).
addColumn(CF01, COLQUALIFIER04, TABLE_PUT_WITH_LIST).
addColumn(CF02, COLQUALIFIER02, TABLE_PUT_WITH_LIST));
actions.add(new Get(ROW_ID_01).addColumn(CF01, COLQUALIFIER02));
Object[] returnedObjects = new Object[actions.size()];
table.batch(actions, returnedObjects);
int index = 0;
for (Object returnedObject : returnedObjects) {
assertTrue("Table#batch action failed for " + actions.get(index).getClass().getSimpleName(),
returnedObject != null);
if (Get.class.isAssignableFrom(actions.get(index).getClass())) {
Result resultFromGet = (Result)returnedObject;
assertTrue("Table#batch Get action returned unexpected Result: expected <"
+ Bytes.toString(TABLE_PUT_WITH_LIST) + ">, returned <"
+ Bytes.toString(resultFromGet.getValue(CF01, COLQUALIFIER02)) + ">",
Bytes.equals(TABLE_PUT_WITH_LIST, resultFromGet.getValue(CF01, COLQUALIFIER02)));
}
index++;
}
}
示例6: updateProfileCountsForSaleInHBase
import org.apache.hadoop.hbase.client.Row; //導入依賴的package包/類
public void updateProfileCountsForSaleInHBase(Long buyerId, Long sellerId, ItemSaleEvent event) throws IOException, InterruptedException {
HTableInterface profileTable = hTablePool.getTable(DataModelConsts.PROFILE_TABLE);
ArrayList<Row> actions = new ArrayList<Row>();
Increment buyerValueIncrement = new Increment(generateProfileRowKey(buyerId));
buyerValueIncrement.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.CURRENT_LOG_IN_PURCHASES_VALUE_COL, event.getItemValue());
buyerValueIncrement.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.TOTAL_VALUE_OF_PAST_SELLS_COL, event.getItemValue());
actions.add(buyerValueIncrement);
Increment sellerValueIncrement = new Increment(generateProfileRowKey(sellerId));
sellerValueIncrement.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.CURRENT_LOG_IN_SELLS_VALUE_COL, event.getItemValue());
sellerValueIncrement.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.TOTAL_VALUE_OF_PAST_SELLS_COL, event.getItemValue());
actions.add(sellerValueIncrement);
profileTable.batch(actions);
}
示例7: logInProfileInHBase
import org.apache.hadoop.hbase.client.Row; //導入依賴的package包/類
public void logInProfileInHBase(long userId, String ipAddress) throws IOException, Exception {
HTableInterface profileTable = hTablePool.getTable(DataModelConsts.PROFILE_TABLE);
ArrayList<Row> actions = new ArrayList<Row>();
byte[] profileRowKey = generateProfileRowKey(userId);
Delete delete = new Delete(profileRowKey);
delete.deleteColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.CURRENT_LOG_IN_PURCHASES_VALUE_COL);
delete.deleteColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.CURRENT_LOG_IN_SELLS_VALUE_COL);
actions.add(delete);
Increment increment = new Increment(profileRowKey);
increment.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.LOG_IN_COUNT_COL, 1);
actions.add(increment);
Put put = new Put(profileRowKey);
put.add(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.LAST_LOG_IN_COL, Bytes.toBytes(System.currentTimeMillis()));
put.add(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.LOG_IN_IP_ADDERSSES, Bytes.toBytes(ipAddress));
actions.add(put);
profileTable.batch(actions);
}
示例8: createProfile
import org.apache.hadoop.hbase.client.Row; //導入依賴的package包/類
@Override
public void createProfile(long userId, ProfilePojo pojo, String ipAddress) throws Exception {
HTableInterface profileTable = hTablePool.getTable(DataModelConsts.PROFILE_TABLE);
ArrayList<Row> actions = new ArrayList<Row>();
byte[] rowKey = generateProfileRowKey(userId);
Put put = new Put(rowKey);
put.add(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.FIXED_INFO_COL, Bytes.toBytes(pojo.getUsername() + "|" + pojo.getAge() + "|" + System.currentTimeMillis()));
put.add(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.LOG_IN_IP_ADDERSSES, Bytes.toBytes(ipAddress));
put.add(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.LAST_LOG_IN_COL, Bytes.toBytes(System.currentTimeMillis()));
actions.add(put);
Increment increment = new Increment(rowKey);
increment.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.LOG_IN_COUNT_COL, 1);
increment.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.TOTAL_SELLS_COL, 0);
increment.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.TOTAL_PURCHASES_COL, 0);
increment.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.TOTAL_VALUE_OF_PAST_PURCHASES_COL, 0);
increment.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.TOTAL_VALUE_OF_PAST_SELLS_COL, 0);
increment.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.CURRENT_LOG_IN_SELLS_VALUE_COL, 0);
increment.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.CURRENT_LOG_IN_PURCHASES_VALUE_COL, 0);
actions.add(increment);
profileTable.batch(actions);
}
示例9: batch
import org.apache.hadoop.hbase.client.Row; //導入依賴的package包/類
/**
* Do the changes and handle the pool
* @param tableName table to insert into
* @param allRows list of actions
* @throws IOException
*/
protected void batch(byte[] tableName, Collection<List<Row>> allRows) throws IOException {
if (allRows.isEmpty()) {
return;
}
HTableInterface table = null;
try {
table = this.sharedHtableCon.getTable(tableName);
for (List<Row> rows : allRows) {
table.batch(rows);
this.metrics.appliedOpsRate.inc(rows.size());
}
} catch (InterruptedException ix) {
throw new IOException(ix);
} finally {
if (table != null) {
table.close();
}
}
}
示例10: batch
import org.apache.hadoop.hbase.client.Row; //導入依賴的package包/類
@Override
public Object[] batch(List<? extends Row> actions)
throws IOException,
InterruptedException {
List<Result> results = new ArrayList<Result>();
for (Row r : actions) {
if (r instanceof Delete) {
delete((Delete) r);
continue;
}
if (r instanceof Put) {
put((Put) r);
continue;
}
if (r instanceof Get) {
results.add(get((Get) r));
}
}
return results.toArray();
}
示例11: getScanner
import org.apache.hadoop.hbase.client.Row; //導入依賴的package包/類
@Override
public ResultScanner getScanner(Scan scan) throws IOException {
LOG.trace("getScanner(Scan)");
ReadRowsRequest.Builder request = scanAdapter.adapt(scan);
metadataSetter.setMetadata(request);
try {
com.google.cloud.bigtable.grpc.ResultScanner<com.google.bigtable.v1.Row> scanner =
client.readRows(request.build());
return bigtableResultScannerAdapter.adapt(scanner);
} catch (Throwable throwable) {
LOG.error("Encountered exception when executing getScanner.", throwable);
throw new IOException(
makeGenericExceptionMessage(
"getScanner",
options.getProjectId(),
tableName.getQualifierAsString()),
throwable);
}
}
示例12: BatchExecutor
import org.apache.hadoop.hbase.client.Row; //導入依賴的package包/類
public BatchExecutor(
BigtableClient client,
BigtableOptions options,
TableMetadataSetter tableMetadataSetter,
ListeningExecutorService service,
OperationAdapter<Get, ReadRowsRequest.Builder> getAdapter,
OperationAdapter<Put, MutateRowRequest.Builder> putAdapter,
OperationAdapter<Delete, MutateRowRequest.Builder> deleteAdapter,
RowMutationsAdapter rowMutationsAdapter,
AppendAdapter appendAdapter,
IncrementAdapter incrementAdapter,
ResponseAdapter<com.google.bigtable.v1.Row, Result> rowToResultAdapter) {
this.client = client;
this.options = options;
this.tableMetadataSetter = tableMetadataSetter;
this.service = service;
this.getAdapter = getAdapter;
this.putAdapter = putAdapter;
this.deleteAdapter = deleteAdapter;
this.rowMutationsAdapter = rowMutationsAdapter;
this.appendAdapter = appendAdapter;
this.incrementAdapter = incrementAdapter;
this.rowToResultAdapter = rowToResultAdapter;
rowResultConverter = new RowResultConverter(rowToResultAdapter);
}
示例13: issueRequest
import org.apache.hadoop.hbase.client.Row; //導入依賴的package包/類
ListenableFuture<? extends GeneratedMessage> issueRequest(Row row) {
if (row instanceof Put) {
return issuePutRequest((Put) row);
} else if (row instanceof Delete) {
return issueDeleteRequest((Delete) row);
} else if (row instanceof Append) {
return issueAppendRequest((Append) row);
} else if (row instanceof Increment) {
return issueIncrementRequest((Increment) row);
} else if (row instanceof Get) {
return issueGetRequest((Get) row);
} else if (row instanceof RowMutations) {
return issueRowMutationsRequest((RowMutations) row);
}
LOG.error("Encountered unknown action type %s", row.getClass());
return Futures.immediateFailedFuture(
new IllegalArgumentException("Encountered unknown action type: " + row.getClass()));
}
示例14: batchCallback
import org.apache.hadoop.hbase.client.Row; //導入依賴的package包/類
/**
* Implementation of
* {@link org.apache.hadoop.hbase.client.HTable#batchCallback(List, Batch.Callback)}
*/
public <R> Object[] batchCallback(
List<? extends Row> actions,
Batch.Callback<R> callback) throws IOException, InterruptedException {
LOG.trace("batchCallback(List<>, Batch.Callback)");
Result[] results = new Result[actions.size()];
int index = 0;
List<ListenableFuture<Object>> resultFutures = new ArrayList<>(actions.size());
for (Row row : actions) {
resultFutures.add(issueRowRequest(row, callback, results, index++));
}
try {
Futures.allAsList(resultFutures).get();
} catch (ExecutionException e) {
LOG.error("Encountered exception in batchCallback(List<>, Batch.Callback). ", e);
throw new IOException("batchCallback error", e);
}
return results;
}
示例15: batch
import org.apache.hadoop.hbase.client.Row; //導入依賴的package包/類
/**
* Do the changes and handle the pool
* @param tableName table to insert into
* @param allRows list of actions
* @throws IOException
*/
protected void batch(TableName tableName, Collection<List<Row>> allRows) throws IOException {
if (allRows.isEmpty()) {
return;
}
Table table = null;
try {
table = this.sharedHtableCon.getTable(tableName);
for (List<Row> rows : allRows) {
table.batch(rows);
}
} catch (InterruptedException ix) {
throw (InterruptedIOException)new InterruptedIOException().initCause(ix);
} finally {
if (table != null) {
table.close();
}
}
}