本文整理汇总了Java中org.apache.hadoop.hbase.client.Result类的典型用法代码示例。如果您正苦于以下问题:Java Result类的具体用法?Java Result怎么用?Java Result使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
Result类属于org.apache.hadoop.hbase.client包,在下文中一共展示了Result类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: verify
import org.apache.hadoop.hbase.client.Result; //导入依赖的package包/类
private void verify(final Table table) throws IOException {
Scan scan = new Scan();
scan.addColumn(FAMILY_NAME, COLUMN_NAME);
scan.setMaxVersions(1);
ResultScanner scanner = table.getScanner(scan);
for (Result r: scanner) {
for (Cell kv : r.listCells()) {
log.debug(Bytes.toString(r.getRow()) + "\t" + Bytes.toString(CellUtil.cloneFamily(kv))
+ "\t" + Bytes.toString(CellUtil.cloneQualifier(kv))
+ "\t" + kv.getTimestamp() + "\t" + Bytes.toBoolean(CellUtil.cloneValue(kv)));
org.junit.Assert.assertEquals(TIMESTAMP.get(kv.getTimestamp()),
(Boolean)Bytes.toBoolean(CellUtil.cloneValue(kv)));
}
}
scanner.close();
}
示例2: map
import org.apache.hadoop.hbase.client.Result; //导入依赖的package包/类
@Override
public void map(ImmutableBytesWritable key, Result result,
Context context)
throws IOException {
List<Long> tsList = new ArrayList<Long>();
for (Cell kv : result.listCells()) {
tsList.add(kv.getTimestamp());
}
List<Put> puts = new ArrayList<>();
for (Long ts : tsList) {
Put put = new Put(key.get());
put.setDurability(Durability.SKIP_WAL);
put.add(FAMILY_NAME, COLUMN_NAME, ts, Bytes.toBytes(true));
puts.add(put);
}
table.put(puts);
}
示例3: QueryByCondition1
import org.apache.hadoop.hbase.client.Result; //导入依赖的package包/类
public static void QueryByCondition1(String tableName) {
HTablePool pool = new HTablePool(configuration, 1000);
HTable table = (HTable) pool.getTable(tableName);
try {
Get scan = new Get("abcdef".getBytes());// 根据rowkey查询
Result r = table.get(scan);
System.out.println("获得到rowkey:" + new String(r.getRow()));
for (KeyValue keyValue : r.raw()) {
System.out.println("列:" + new String(keyValue.getFamily())
+ "====值:" + new String(keyValue.getValue()));
}
} catch (IOException e) {
e.printStackTrace();
}
}
示例4: doPutTest
import org.apache.hadoop.hbase.client.Result; //导入依赖的package包/类
private void doPutTest(byte[] row) throws IOException, InterruptedException {
Put put = new Put(row);
put.add(famName, row, row);
if (htable1 == null) {
htable1 = utility1.getConnection().getTable(tableName);
}
htable1.put(put);
Get get = new Get(row);
for (int i = 0; i < NB_RETRIES; i++) {
if (i == NB_RETRIES - 1) {
fail("Waited too much time for put replication");
}
Result res = htable2.get(get);
if (res.size() == 0) {
LOG.info("Row not available");
Thread.sleep(SLEEP_TIME);
} else {
assertArrayEquals(res.value(), row);
break;
}
}
}
示例5: QueryByCondition2
import org.apache.hadoop.hbase.client.Result; //导入依赖的package包/类
public static void QueryByCondition2(String tableName) {
try {
HTablePool pool = new HTablePool(configuration, 1000);
HTable table = (HTable) pool.getTable(tableName);
Filter filter = new SingleColumnValueFilter(Bytes
.toBytes("column1"), null, CompareOp.EQUAL, Bytes
.toBytes("aaa")); // 当列column1的值为aaa时进行查询
Scan s = new Scan();
s.setFilter(filter);
ResultScanner rs = table.getScanner(s);
for (Result r : rs) {
System.out.println("获得到rowkey:" + new String(r.getRow()));
for (KeyValue keyValue : r.raw()) {
System.out.println("列:" + new String(keyValue.getFamily())
+ "====值:" + new String(keyValue.getValue()));
}
}
} catch (Exception e) {
e.printStackTrace();
}
}
示例6: testVisibilityLabelsOnRSRestart
import org.apache.hadoop.hbase.client.Result; //导入依赖的package包/类
@Test(timeout = 60 * 1000)
public void testVisibilityLabelsOnRSRestart() throws Exception {
final TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
List<RegionServerThread> regionServerThreads = TEST_UTIL.getHBaseCluster()
.getRegionServerThreads();
for (RegionServerThread rsThread : regionServerThreads) {
rsThread.getRegionServer().abort("Aborting ");
}
// Start one new RS
RegionServerThread rs = TEST_UTIL.getHBaseCluster().startRegionServer();
waitForLabelsRegionAvailability(rs.getRegionServer());
try (Table table = createTableAndWriteDataWithLabels(tableName, "(" + SECRET + "|" + CONFIDENTIAL
+ ")", PRIVATE);) {
Scan s = new Scan();
s.setAuthorizations(new Authorizations(SECRET));
ResultScanner scanner = table.getScanner(s);
Result[] next = scanner.next(3);
assertTrue(next.length == 1);
}
}
示例7: verify
import org.apache.hadoop.hbase.client.Result; //导入依赖的package包/类
private void verify(Scan scan) throws IOException {
ResultScanner scanner = htable.getScanner(scan);
Iterator<Result> it = scanner.iterator();
/* Then */
int count = 0;
try {
while (it.hasNext()) {
it.next();
count++;
}
} finally {
scanner.close();
}
assertEquals(expected, count);
}
示例8: findStartNode
import org.apache.hadoop.hbase.client.Result; //导入依赖的package包/类
private static CINode findStartNode(Table table, byte[] startKey) throws IOException {
Scan scan = new Scan();
scan.setStartRow(startKey);
scan.setBatch(1);
scan.addColumn(FAMILY_NAME, COLUMN_PREV);
long t1 = System.currentTimeMillis();
ResultScanner scanner = table.getScanner(scan);
Result result = scanner.next();
long t2 = System.currentTimeMillis();
scanner.close();
if ( result != null) {
CINode node = getCINode(result, new CINode());
System.out.printf("FSR %d %s\n", t2 - t1, Bytes.toStringBinary(node.key));
return node;
}
System.out.println("FSR " + (t2 - t1));
return null;
}
示例9: rowFilter
import org.apache.hadoop.hbase.client.Result; //导入依赖的package包/类
/**
* 使用行过滤器 选择大于rowKey的行
*
* @param tableName 表名
* @param rowKey 行健
* @param count 数量
*/
public void rowFilter(String tableName, String rowKey, int count) {
HBaseConfiguration hBaseConfiguration = new HBaseConfiguration();
Table table = hBaseConfiguration.table(tableName);
Scan scan = new Scan();
//使用行过滤器 选择大于 rowkey的行
//scan.setFilter(new RowFilter(CompareFilter.CompareOp.GREATER, new BinaryComparator(Bytes.toBytes(rowKey))));//直接行健
//scan.setFilter(new RowFilter(CompareFilter.CompareOp.GREATER_OR_EQUAL, new RegexStringComparator("row.*")));//正则表达式
//scan.setFilter(new RowFilter(CompareFilter.CompareOp.GREATER_OR_EQUAL, new SubstringComparator("row")));//字符串包含
scan.setFilter(new RowFilter(CompareFilter.CompareOp.GREATER_OR_EQUAL, new BinaryPrefixComparator("row".getBytes())));//字符串前缀
scan.setCaching(10);
scan.setBatch(10);
try {
ResultScanner scanner = table.getScanner(scan);
Result[] results = scanner.next(count);
HBaseResultUtil.print(results);
} catch (IOException e) {
e.printStackTrace();
}
}
示例10: getAllRecord
import org.apache.hadoop.hbase.client.Result; //导入依赖的package包/类
/**
* Getting all records a row from an existing SS tables
* @method getAllRecord
* @inputParameters hbaseBtable Name used
* @return type: no return type as its a void method
*
**/
@SuppressWarnings({ "deprecation", "resource" })
public static void getAllRecord(String myHbaseBtableName) {
ResultScanner hbaseBSs = null;
try {
HTable hbaseBtable = new HTable(hbaseBconf, myHbaseBtableName);
Scan hbaseBScan = new Scan();
hbaseBSs = hbaseBtable.getScanner(hbaseBScan);
for (Result r : hbaseBSs) {
for (KeyValue hbaseBkv : r.raw()) {
System.out.print(new String(hbaseBkv.getRow()) + " ");
System.out.print(new String(hbaseBkv.getFamily()) + ":");
System.out.print(new String(hbaseBkv.getQualifier()) + " ");
System.out.print(hbaseBkv.getTimestamp() + " ");
System.out.println(new String(hbaseBkv.getValue()));
}
}
} catch (IOException eio) {
eip.printStackTrace();
} finally {
if (hbaseBSs != null) hbaseBSs.close();
// closing the ss hbaseBtable
}
}
示例11: getServerUserRegions
import org.apache.hadoop.hbase.client.Result; //导入依赖的package包/类
/**
* @param connection connection we're using
* @param serverName server whose regions we're interested in
* @return List of user regions installed on this server (does not include
* catalog regions).
* @throws IOException
*/
public static NavigableMap<HRegionInfo, Result>
getServerUserRegions(Connection connection, final ServerName serverName)
throws IOException {
final NavigableMap<HRegionInfo, Result> hris = new TreeMap<HRegionInfo, Result>();
// Fill the above hris map with entries from hbase:meta that have the passed
// servername.
CollectingVisitor<Result> v = new CollectingVisitor<Result>() {
@Override
void add(Result r) {
if (r == null || r.isEmpty()) return;
RegionLocations locations = getRegionLocations(r);
if (locations == null) return;
for (HRegionLocation loc : locations.getRegionLocations()) {
if (loc != null) {
if (loc.getServerName() != null && loc.getServerName().equals(serverName)) {
hris.put(loc.getRegionInfo(), r);
}
}
}
}
};
fullScan(connection, v);
return hris;
}
示例12: testCheckAndDelete
import org.apache.hadoop.hbase.client.Result; //导入依赖的package包/类
@Test
public void testCheckAndDelete() throws IOException {
Get get = new Get(ROW_1);
Result result = remoteTable.get(get);
byte[] value1 = result.getValue(COLUMN_1, QUALIFIER_1);
byte[] value2 = result.getValue(COLUMN_2, QUALIFIER_2);
assertNotNull(value1);
assertTrue(Bytes.equals(VALUE_1, value1));
assertNull(value2);
assertTrue(remoteTable.exists(get));
assertEquals(1, remoteTable.existsAll(Collections.singletonList(get)).length);
Delete delete = new Delete(ROW_1);
remoteTable.checkAndDelete(ROW_1, COLUMN_1, QUALIFIER_1, VALUE_1, delete);
assertFalse(remoteTable.exists(get));
Put put = new Put(ROW_1);
put.add(COLUMN_1, QUALIFIER_1, VALUE_1);
remoteTable.put(put);
assertTrue(remoteTable.checkAndPut(ROW_1, COLUMN_1, QUALIFIER_1, VALUE_1,
put));
assertFalse(remoteTable.checkAndPut(ROW_1, COLUMN_1, QUALIFIER_1, VALUE_2,
put));
}
示例13: testSimpleVisibilityLabels
import org.apache.hadoop.hbase.client.Result; //导入依赖的package包/类
@Test
public void testSimpleVisibilityLabels() throws Exception {
TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
try (Table table = createTableAndWriteDataWithLabels(tableName, SECRET + "|" + CONFIDENTIAL,
PRIVATE + "|" + CONFIDENTIAL)) {
Scan s = new Scan();
s.setAuthorizations(new Authorizations(SECRET, CONFIDENTIAL, PRIVATE));
ResultScanner scanner = table.getScanner(s);
Result[] next = scanner.next(3);
assertTrue(next.length == 2);
CellScanner cellScanner = next[0].cellScanner();
cellScanner.advance();
Cell current = cellScanner.current();
assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(),
current.getRowLength(), row1, 0, row1.length));
cellScanner = next[1].cellScanner();
cellScanner.advance();
current = cellScanner.current();
assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(),
current.getRowLength(), row2, 0, row2.length));
}
}
示例14: map
import org.apache.hadoop.hbase.client.Result; //导入依赖的package包/类
/**
* Pass the key and value to reduce.
*
* @param key The key, here "aaa", "aab" etc.
* @param value The value is the same as the key.
* @param context The task context.
* @throws IOException When reading the rows fails.
*/
@Override
public void map(ImmutableBytesWritable key, Result value,
Context context)
throws IOException, InterruptedException {
if (value.size() != 1) {
throw new IOException("There should only be one input column");
}
Map<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>>
cf = value.getMap();
if(!cf.containsKey(INPUT_FAMILY)) {
throw new IOException("Wrong input columns. Missing: '" +
Bytes.toString(INPUT_FAMILY) + "'.");
}
String val = Bytes.toStringBinary(value.getValue(INPUT_FAMILY, null));
LOG.info("map: key -> " + Bytes.toStringBinary(key.get()) +
", value -> " + val);
context.write(key, key);
}
示例15: findAll
import org.apache.hadoop.hbase.client.Result; //导入依赖的package包/类
@Override
public List<UserInfo> findAll(String tablename, String family) {
byte[] cf_info = family.getBytes();
byte[] age_info = Bytes.toBytes("age");
byte[] id_info = Bytes.toBytes("id");
byte[] username_info = Bytes.toBytes("userName");
return hbaseTemplate.find(tablename, family, new RowMapper<UserInfo>() {
@Override
public UserInfo mapRow(Result result, int rowNum) throws Exception {
UserInfo u = new UserInfo();
u.setId(Bytes.toString(result.getValue(cf_info,id_info)));
u.setUserName(Bytes.toString(result.getValue(cf_info,username_info)));
u.setAge(Bytes.toInt(result.getValue(cf_info,age_info)));
return u;
}
});
}