本文整理汇总了Java中org.apache.hadoop.hbase.client.Result.size方法的典型用法代码示例。如果您正苦于以下问题:Java Result.size方法的具体用法?Java Result.size怎么用?Java Result.size使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.client.Result
的用法示例。
在下文中一共展示了Result.size方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: deleteAndWait
import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
private void deleteAndWait(byte[] row, Table source, Table... targets)
throws Exception {
Delete del = new Delete(row);
source.delete(del);
Get get = new Get(row);
for (int i = 0; i < NB_RETRIES; i++) {
if (i==NB_RETRIES-1) {
fail("Waited too much time for del replication");
}
boolean removedFromAll = true;
for (Table target : targets) {
Result res = target.get(get);
if (res.size() >= 1) {
LOG.info("Row not deleted");
removedFromAll = false;
break;
}
}
if (removedFromAll) {
break;
} else {
Thread.sleep(SLEEP_TIME);
}
}
}
示例2: map
import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
/**
* Implements mapper logic for use across APIs.
*/
protected static Put map(ImmutableBytesWritable key, Result value) throws IOException {
if (value.size() != 1) {
throw new IOException("There should only be one input column");
}
Map<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>>
cf = value.getMap();
if(!cf.containsKey(INPUT_FAMILY)) {
throw new IOException("Wrong input columns. Missing: '" +
Bytes.toString(INPUT_FAMILY) + "'.");
}
// Get the original value and reverse it
String originalValue = Bytes.toString(value.getValue(INPUT_FAMILY, null));
StringBuilder newValue = new StringBuilder(originalValue);
newValue.reverse();
// Now set the value to be collected
Put outval = new Put(key.get());
outval.add(OUTPUT_FAMILY, null, Bytes.toBytes(newValue.toString()));
return outval;
}
示例3: parsePermissions
import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
private static ListMultimap<String, TablePermission> parsePermissions(
byte[] entryName, Result result) {
ListMultimap<String, TablePermission> perms = ArrayListMultimap.create();
if (result != null && result.size() > 0) {
for (Cell kv : result.rawCells()) {
Pair<String,TablePermission> permissionsOfUserOnTable =
parsePermissionRecord(entryName, kv);
if (permissionsOfUserOnTable != null) {
String username = permissionsOfUserOnTable.getFirst();
TablePermission permissions = permissionsOfUserOnTable.getSecond();
perms.put(username, permissions);
}
}
}
return perms;
}
示例4: wait
import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
private void wait(byte[] row, Table target, boolean isDeleted)
throws Exception {
Get get = new Get(row);
for (int i = 0; i < NB_RETRIES; i++) {
if (i == NB_RETRIES - 1) {
fail("Waited too much time for replication. Row:" + Bytes.toString(row)
+ ". IsDeleteReplication:" + isDeleted);
}
Result res = target.get(get);
boolean sleep = isDeleted ? res.size() > 0 : res.size() == 0;
if (sleep) {
LOG.info("Waiting for more time for replication. Row:"
+ Bytes.toString(row) + ". IsDeleteReplication:" + isDeleted);
Thread.sleep(SLEEP_TIME);
} else {
if (!isDeleted) {
assertArrayEquals(res.value(), row);
}
LOG.info("Obtained row:"
+ Bytes.toString(row) + ". IsDeleteReplication:" + isDeleted);
break;
}
}
}
示例5: checkWithWait
import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
private void checkWithWait(byte[] row, int count, Table table) throws Exception {
Get get = new Get(row);
for (int i = 0; i < NB_RETRIES; i++) {
if (i == NB_RETRIES - 1) {
fail("Waited too much time while getting the row.");
}
boolean rowReplicated = false;
Result res = table.get(get);
if (res.size() >= 1) {
LOG.info("Row is replicated");
rowReplicated = true;
assertEquals("Table '" + table + "' did not have the expected number of results.",
count, res.size());
break;
}
if (rowReplicated) {
break;
} else {
Thread.sleep(SLEEP_TIME);
}
}
}
示例6: doPutTest
import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
private void doPutTest(byte[] row) throws IOException, InterruptedException {
Put put = new Put(row);
put.add(famName, row, row);
if (htable1 == null) {
htable1 = utility1.getConnection().getTable(tableName);
}
htable1.put(put);
Get get = new Get(row);
for (int i = 0; i < NB_RETRIES; i++) {
if (i == NB_RETRIES - 1) {
fail("Waited too much time for put replication");
}
Result res = htable2.get(get);
if (res.size() == 0) {
LOG.info("Row not available");
Thread.sleep(SLEEP_TIME);
} else {
assertArrayEquals(res.value(), row);
break;
}
}
}
示例7: map
import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
/**
* Pass the key, and reversed value to reduce
*
* @param key
* @param value
* @param context
* @throws IOException
*/
public void map(ImmutableBytesWritable key, Result value,
Context context)
throws IOException, InterruptedException {
if (value.size() != 1) {
throw new IOException("There should only be one input column");
}
Map<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>>
cf = value.getMap();
if(!cf.containsKey(INPUT_FAMILY)) {
throw new IOException("Wrong input columns. Missing: '" +
Bytes.toString(INPUT_FAMILY) + "'.");
}
// Get the original value and reverse it
String originalValue = Bytes.toString(value.getValue(INPUT_FAMILY, null));
StringBuilder newValue = new StringBuilder(originalValue);
newValue.reverse();
// Now set the value to be collected
Put outval = new Put(key.get());
outval.add(OUTPUT_FAMILY, null, Bytes.toBytes(newValue.toString()));
context.write(key, outval);
}
示例8: map
import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
/**
* Pass the key and value to reduce.
*
* @param key The key, here "aaa", "aab" etc.
* @param value The value is the same as the key.
* @param context The task context.
* @throws IOException When reading the rows fails.
*/
@Override
public void map(ImmutableBytesWritable key, Result value,
Context context)
throws IOException, InterruptedException {
if (value.size() != 1) {
throw new IOException("There should only be one input column");
}
Map<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>>
cf = value.getMap();
if(!cf.containsKey(INPUT_FAMILY)) {
throw new IOException("Wrong input columns. Missing: '" +
Bytes.toString(INPUT_FAMILY) + "'.");
}
String val = Bytes.toStringBinary(value.getValue(INPUT_FAMILY, null));
LOG.info("map: key -> " + Bytes.toStringBinary(key.get()) +
", value -> " + val);
context.write(key, key);
}
示例9: map
import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
/**
* Pass the key and value to reduce.
*
* @param key The key, here "aaa", "aab" etc.
* @param value The value is the same as the key.
* @param context The task context.
* @throws IOException When reading the rows fails.
*/
@Override
public void map(ImmutableBytesWritable key, Result value, Context context)
throws IOException, InterruptedException {
if (value.size() != 1) {
throw new IOException("There should only be one input column");
}
Map<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>> cf =
value.getMap();
if (!cf.containsKey(INPUT_FAMILY)) {
throw new IOException("Wrong input columns. Missing: '" +
Bytes.toString(INPUT_FAMILY) + "'.");
}
String val = Bytes.toStringBinary(value.getValue(INPUT_FAMILY, null));
LOG.debug("map: key -> " + Bytes.toStringBinary(key.get()) +
", value -> " + val);
context.write(key, key);
}
示例10: doAction
import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
@Override
protected boolean doAction() throws Exception {
ResultScanner rs = null;
try {
Scan s = new Scan();
s.setBatch(2);
s.addFamily(FAMILY);
s.setFilter(new KeyOnlyFilter());
s.setMaxVersions(1);
rs = table.getScanner(s);
Result result = rs.next();
return result != null && result.size() > 0;
} finally {
if (rs != null) {
rs.close();
}
}
}
示例11: getTableRegionForRow
import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
/**
* Return the region and current deployment for the region containing
* the given row. If the region cannot be found, returns null. If it
* is found, but not currently deployed, the second element of the pair
* may be null.
*/
@VisibleForTesting // Used by TestMaster.
Pair<HRegionInfo, ServerName> getTableRegionForRow(
final TableName tableName, final byte [] rowKey)
throws IOException {
final AtomicReference<Pair<HRegionInfo, ServerName>> result =
new AtomicReference<Pair<HRegionInfo, ServerName>>(null);
MetaScannerVisitor visitor =
new MetaScannerVisitorBase() {
@Override
public boolean processRow(Result data) throws IOException {
if (data == null || data.size() <= 0) {
return true;
}
Pair<HRegionInfo, ServerName> pair = HRegionInfo.getHRegionInfoAndServerName(data);
if (pair == null) {
return false;
}
if (!pair.getFirst().getTable().equals(tableName)) {
return false;
}
result.set(pair);
return true;
}
};
MetaScanner.metaScan(clusterConnection, visitor, tableName, rowKey, 1);
return result.get();
}
示例12: deleteAndWaitWithFamily
import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
private void deleteAndWaitWithFamily(byte[] row, byte[] fam,
Table source, Table... targets)
throws Exception {
Delete del = new Delete(row);
del.deleteFamily(fam);
source.delete(del);
Get get = new Get(row);
get.addFamily(fam);
for (int i = 0; i < NB_RETRIES; i++) {
if (i==NB_RETRIES-1) {
fail("Waited too much time for del replication");
}
boolean removedFromAll = true;
for (Table target : targets) {
Result res = target.get(get);
if (res.size() >= 1) {
LOG.info("Row not deleted");
removedFromAll = false;
break;
}
}
if (removedFromAll) {
break;
} else {
Thread.sleep(SLEEP_TIME);
}
}
}
示例13: putAndWaitWithFamily
import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
private void putAndWaitWithFamily(byte[] row, byte[] fam,
Table source, Table... targets)
throws Exception {
Put put = new Put(row);
put.add(fam, row, val);
source.put(put);
Get get = new Get(row);
get.addFamily(fam);
for (int i = 0; i < NB_RETRIES; i++) {
if (i==NB_RETRIES-1) {
fail("Waited too much time for put replication");
}
boolean replicatedToAll = true;
for (Table target : targets) {
Result res = target.get(get);
if (res.size() == 0) {
LOG.info("Row not available");
replicatedToAll = false;
break;
} else {
assertEquals(res.size(), 1);
assertArrayEquals(res.value(), val);
}
}
if (replicatedToAll) {
break;
} else {
Thread.sleep(SLEEP_TIME);
}
}
}
示例14: putAndWait
import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
private void putAndWait(byte[] row, byte[] fam, Table source, Table... targets)
throws Exception {
Put put = new Put(row);
put.add(fam, row, row);
source.put(put);
Get get = new Get(row);
for (int i = 0; i < NB_RETRIES; i++) {
if (i==NB_RETRIES-1) {
fail("Waited too much time for put replication");
}
boolean replicatedToAll = true;
for (Table target : targets) {
Result res = target.get(get);
if (res.size() == 0) {
LOG.info("Row not available");
replicatedToAll = false;
break;
} else {
assertArrayEquals(res.value(), row);
}
}
if (replicatedToAll) {
break;
} else {
Thread.sleep(SLEEP_TIME);
}
}
}
示例15: testReplicationWithCellTags
import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
@Test(timeout = 300000)
public void testReplicationWithCellTags() throws Exception {
LOG.info("testSimplePutDelete");
Put put = new Put(ROW);
put.setAttribute("visibility", Bytes.toBytes("myTag3"));
put.add(FAMILY, ROW, ROW);
htable1 = new HTable(conf1, TABLE_NAME);
htable1.put(put);
Get get = new Get(ROW);
try {
for (int i = 0; i < NB_RETRIES; i++) {
if (i == NB_RETRIES - 1) {
fail("Waited too much time for put replication");
}
Result res = htable2.get(get);
if (res.size() == 0) {
LOG.info("Row not available");
Thread.sleep(SLEEP_TIME);
} else {
assertArrayEquals(res.value(), ROW);
assertEquals(1, TestCoprocessorForTagsAtSink.tags.size());
Tag tag = TestCoprocessorForTagsAtSink.tags.get(0);
assertEquals(TAG_TYPE, tag.getType());
break;
}
}
} finally {
TestCoprocessorForTagsAtSink.tags = null;
}
}