本文整理汇总了Java中org.apache.hadoop.hbase.client.Result.getValue方法的典型用法代码示例。如果您正苦于以下问题:Java Result.getValue方法的具体用法?Java Result.getValue怎么用?Java Result.getValue使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.client.Result
的用法示例。
在下文中一共展示了Result.getValue方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getAll
import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
/**
* 获取单张表的所有记录
*
* @throws IOException
*/
public static Map<byte[], byte[]> getAll(String TableName, String ColumnFamily, String ColumnName)
throws IOException {
Map<byte[], byte[]> tableContent = new HashMap<byte[], byte[]>();
Scan s = new Scan();
s.addColumn(Bytes.toBytes(ColumnFamily), Bytes.toBytes(ColumnName));
s.setMaxVersions(1);
s.setCacheBlocks(false);
ResultScanner rs = hbase_table.getScanner(s);
for (Result r : rs) {
byte[] key = r.getRow();
byte[] value = r.getValue(Bytes.toBytes(ColumnFamily), Bytes.toBytes(ColumnName));
tableContent.put(key, value);
}
rs.close();
return tableContent;
}
示例2: copyTable
import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
/**
* 拷贝表
*
* @throws IOException
*/
public static void copyTable(String oldTableName, String newTableName,String ColumnFamily, String ColumnName)throws IOException {
if(CreateNewTable(newTableName))
logger.info("创建表"+newTableName+"表成功");
else{
logger.info("创建表"+newTableName+"表失败");
}
Scan s = new Scan();
s.addColumn(Bytes.toBytes(ColumnFamily), Bytes.toBytes(ColumnName));
s.setMaxVersions(1);
s.setCacheBlocks(false);
ResultScanner rs = hbase_table.getScanner(s);
HTableInterface hbase_table_new = conn.getTable(newTableName);
for (Result r : rs) {
byte[] key = r.getRow();
byte[] value = r.getValue(Bytes.toBytes(ColumnFamily), Bytes.toBytes(ColumnName));
Put put = new Put(key);
put.add(Bytes.toBytes(ColumnFamily), Bytes.toBytes(ColumnName), value);
hbase_table_new.put(put);
}
rs.close();
hbase_table_new.close();
}
示例3: map
import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
@Override
protected void map(ImmutableBytesWritable rowKey, Result result, Context context)
throws IOException, InterruptedException {
for(java.util.Map.Entry<byte[], ImmutableBytesWritable> index : indexes.entrySet()) {
byte[] qualifier = index.getKey();
ImmutableBytesWritable tableName = index.getValue();
byte[] value = result.getValue(family, qualifier);
if (value != null) {
// original: row 123 attribute:phone 555-1212
// index: row 555-1212 INDEX:ROW 123
Put put = new Put(value);
put.add(INDEX_COLUMN, INDEX_QUALIFIER, rowKey.get());
context.write(tableName, put);
}
}
}
示例4: getMetaRow
import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
private Result getMetaRow() throws IOException {
Result currentRow = metaScanner.next();
boolean foundResult = false;
while (currentRow != null) {
LOG.info("Row: <" + Bytes.toStringBinary(currentRow.getRow()) + ">");
byte[] regionInfoValue = currentRow.getValue(HConstants.CATALOG_FAMILY,
HConstants.REGIONINFO_QUALIFIER);
if (regionInfoValue == null || regionInfoValue.length == 0) {
currentRow = metaScanner.next();
continue;
}
HRegionInfo region = HRegionInfo.getHRegionInfo(currentRow);
if (!region.getTable().equals(this.tableName)) {
currentRow = metaScanner.next();
continue;
}
foundResult = true;
break;
}
return foundResult ? currentRow : null;
}
示例5: verifyMetaRowsAreUpdated
import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
/**
* Verify that every hbase:meta row is updated
*/
void verifyMetaRowsAreUpdated(HConnection hConnection)
throws IOException {
List<Result> results = MetaTableAccessor.fullScan(hConnection);
assertTrue(results.size() >= REGION_COUNT);
for (Result result : results) {
byte[] hriBytes = result.getValue(HConstants.CATALOG_FAMILY,
HConstants.REGIONINFO_QUALIFIER);
assertTrue(hriBytes != null && hriBytes.length > 0);
assertTrue(MetaMigrationConvertingToPB.isMigrated(hriBytes));
byte[] splitA = result.getValue(HConstants.CATALOG_FAMILY,
HConstants.SPLITA_QUALIFIER);
if (splitA != null && splitA.length > 0) {
assertTrue(MetaMigrationConvertingToPB.isMigrated(splitA));
}
byte[] splitB = result.getValue(HConstants.CATALOG_FAMILY,
HConstants.SPLITB_QUALIFIER);
if (splitB != null && splitB.length > 0) {
assertTrue(MetaMigrationConvertingToPB.isMigrated(splitB));
}
}
}
示例6: verifyRowFromMap
import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
protected static void verifyRowFromMap(ImmutableBytesWritable key, Result result)
throws IOException {
byte[] row = key.get();
CellScanner scanner = result.cellScanner();
while (scanner.advance()) {
Cell cell = scanner.current();
//assert that all Cells in the Result have the same key
Assert.assertEquals(0, Bytes.compareTo(row, 0, row.length,
cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()));
}
for (int j = 0; j < FAMILIES.length; j++) {
byte[] actual = result.getValue(FAMILIES[j], null);
Assert.assertArrayEquals("Row in snapshot does not match, expected:" + Bytes.toString(row)
+ " ,actual:" + Bytes.toString(actual), row, actual);
}
}
示例7: doAnAction
import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
public void doAnAction() throws Exception {
Get g = new Get(targetRow);
Result res = table.get(g);
byte[] gotValue = null;
if (res.getRow() == null) {
// Trying to verify but we didn't find the row - the writing
// thread probably just hasn't started writing yet, so we can
// ignore this action
return;
}
for (byte[] family : targetFamilies) {
for (int i = 0; i < NUM_COLS_TO_CHECK; i++) {
byte qualifier[] = Bytes.toBytes("col" + i);
byte thisValue[] = res.getValue(family, qualifier);
if (gotValue != null && !Bytes.equals(gotValue, thisValue)) {
gotFailure(gotValue, res);
}
numVerified++;
gotValue = thisValue;
}
}
numRead.getAndIncrement();
}
示例8: doGets
import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
private void doGets(Region region) throws IOException{
for (int i = 0; i < NUM_ROWS; ++i) {
final byte[] rowKey = LoadTestKVGenerator.md5PrefixedKey(i).getBytes();
for (int j = 0; j < NUM_COLS_PER_ROW; ++j) {
final String qualStr = String.valueOf(j);
if (VERBOSE) {
System.err.println("Reading row " + i + ", column " + j + " " + Bytes.toString(rowKey)+"/"
+qualStr);
}
final byte[] qualBytes = Bytes.toBytes(qualStr);
Get get = new Get(rowKey);
get.addColumn(CF_BYTES, qualBytes);
Result result = region.get(get);
assertEquals(1, result.size());
byte[] value = result.getValue(CF_BYTES, qualBytes);
assertTrue(LoadTestKVGenerator.verify(value, rowKey, qualBytes));
}
}
}
示例9: testCheckAndDelete
import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
@Test
public void testCheckAndDelete() throws IOException {
Get get = new Get(ROW_1);
Result result = remoteTable.get(get);
byte[] value1 = result.getValue(COLUMN_1, QUALIFIER_1);
byte[] value2 = result.getValue(COLUMN_2, QUALIFIER_2);
assertNotNull(value1);
assertTrue(Bytes.equals(VALUE_1, value1));
assertNull(value2);
assertTrue(remoteTable.exists(get));
assertEquals(1, remoteTable.existsAll(Collections.singletonList(get)).length);
Delete delete = new Delete(ROW_1);
remoteTable.checkAndDelete(ROW_1, COLUMN_1, QUALIFIER_1, VALUE_1, delete);
assertFalse(remoteTable.exists(get));
Put put = new Put(ROW_1);
put.add(COLUMN_1, QUALIFIER_1, VALUE_1);
remoteTable.put(put);
assertTrue(remoteTable.checkAndPut(ROW_1, COLUMN_1, QUALIFIER_1, VALUE_1,
put));
assertFalse(remoteTable.checkAndPut(ROW_1, COLUMN_1, QUALIFIER_1, VALUE_2,
put));
}
示例10: dumpExtraInfoOnRefs
import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
/**
* Dump out extra info around references if there are any. Helps debugging.
* @return StringBuilder filled with references if any.
* @throws IOException
*/
private StringBuilder dumpExtraInfoOnRefs(final BytesWritable key, final Context context,
final List<byte []> refs)
throws IOException {
StringBuilder refsSb = null;
if (refs.isEmpty()) return refsSb;
refsSb = new StringBuilder();
String comma = "";
// If a row is a reference but has no define, print the content of the row that has
// this row as a 'prev'; it will help debug. The missing row was written just before
// the row we are dumping out here.
TableName tn = getTableName(context.getConfiguration());
try (Table t = this.connection.getTable(tn)) {
for (byte [] ref : refs) {
Result r = t.get(new Get(ref));
List<Cell> cells = r.listCells();
String ts = (cells != null && !cells.isEmpty())?
new java.util.Date(cells.get(0).getTimestamp()).toString(): "";
byte [] b = r.getValue(FAMILY_NAME, COLUMN_CLIENT);
String jobStr = (b != null && b.length > 0)? Bytes.toString(b): "";
b = r.getValue(FAMILY_NAME, COLUMN_COUNT);
long count = (b != null && b.length > 0)? Bytes.toLong(b): -1;
b = r.getValue(FAMILY_NAME, COLUMN_PREV);
String refRegionLocation = "";
String keyRegionLocation = "";
if (b != null && b.length > 0) {
try (RegionLocator rl = this.connection.getRegionLocator(tn)) {
HRegionLocation hrl = rl.getRegionLocation(b);
if (hrl != null) refRegionLocation = hrl.toString();
// Key here probably has trailing zeros on it.
hrl = rl.getRegionLocation(key.getBytes());
if (hrl != null) keyRegionLocation = hrl.toString();
}
}
LOG.error("Extras on ref without a def, ref=" + Bytes.toStringBinary(ref) +
", refPrevEqualsKey=" +
(Bytes.compareTo(key.getBytes(), 0, key.getLength(), b, 0, b.length) == 0) +
", key=" + Bytes.toStringBinary(key.getBytes(), 0, key.getLength()) +
", ref row date=" + ts + ", jobStr=" + jobStr +
", ref row count=" + count +
", ref row regionLocation=" + refRegionLocation +
", key row regionLocation=" + keyRegionLocation);
refsSb.append(comma);
comma = ",";
refsSb.append(Bytes.toStringBinary(ref));
}
}
return refsSb;
}
示例11: parseNamespaceResult
import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
protected static void parseNamespaceResult(final String namespace, final Result result,
final NamespaceQuotasVisitor visitor) throws IOException {
byte[] data = result.getValue(QUOTA_FAMILY_INFO, QUOTA_QUALIFIER_SETTINGS);
if (data != null) {
Quotas quotas = quotasFromData(data);
visitor.visitNamespaceQuotas(namespace, quotas);
}
}
示例12: parseTableResult
import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
protected static void parseTableResult(final TableName table, final Result result,
final TableQuotasVisitor visitor) throws IOException {
byte[] data = result.getValue(QUOTA_FAMILY_INFO, QUOTA_QUALIFIER_SETTINGS);
if (data != null) {
Quotas quotas = quotasFromData(data);
visitor.visitTableQuotas(table, quotas);
}
}
示例13: isMetaTableUpdated
import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
/**
* @param hConnection connection to be used
* @return True if the meta table has been migrated.
* @throws IOException
*/
static boolean isMetaTableUpdated(final HConnection hConnection) throws IOException {
List<Result> results = MetaTableAccessor.fullScanOfMeta(hConnection);
if (results == null || results.isEmpty()) {
LOG.info("hbase:meta doesn't have any entries to update.");
return true;
}
for (Result r : results) {
byte[] value = r.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
if (!isMigrated(value)) {
return false;
}
}
return true;
}
示例14: cleanParent
import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
/**
* If daughters no longer hold reference to the parents, delete the parent.
* @param parent HRegionInfo of split offlined parent
* @param rowContent Content of <code>parent</code> row in
* <code>metaRegionName</code>
* @return True if we removed <code>parent</code> from meta table and from
* the filesystem.
* @throws IOException
*/
boolean cleanParent(final HRegionInfo parent, Result rowContent)
throws IOException {
boolean result = false;
// Check whether it is a merged region and not clean reference
// No necessary to check MERGEB_QUALIFIER because these two qualifiers will
// be inserted/deleted together
if (rowContent.getValue(HConstants.CATALOG_FAMILY,
HConstants.MERGEA_QUALIFIER) != null) {
// wait cleaning merge region first
return result;
}
// Run checks on each daughter split.
PairOfSameType<HRegionInfo> daughters = HRegionInfo.getDaughterRegions(rowContent);
Pair<Boolean, Boolean> a = checkDaughterInFs(parent, daughters.getFirst());
Pair<Boolean, Boolean> b = checkDaughterInFs(parent, daughters.getSecond());
if (hasNoReferences(a) && hasNoReferences(b)) {
LOG.debug("Deleting region " + parent.getRegionNameAsString() +
" because daughter splits no longer hold references");
FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
if (LOG.isTraceEnabled()) LOG.trace("Archiving parent region: " + parent);
HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, parent);
MetaTableAccessor.deleteRegion(this.connection, parent);
result = true;
}
return result;
}
示例15: getDataFromHbaseRest
import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
public static void getDataFromHbaseRest() {
ResultScanner scanner = null;// it needs to be initialized to null
Cluster hbaseCluster = new Cluster();//Creating and cluster object
hbaseCluster.add("172.28.182.45", 8080);//passing the IP and post
// Create Rest client instance and get the connection
Client restClient = new Client(hbaseCluster);//pass the cluster object to the cliet
table = new RemoteHTable(restClient, "mywebproject:myclickstream");// Makes a Remote Call
Get get = new Get(Bytes.toBytes("row02"));//Gets the row in question
Result result1=null;// initilizing it to null
try {
result1 = table.get(get);// getting the table and the connection object
byte[] valueWeb = result1.getValue(Bytes.toBytes("web"), Bytes.toBytes("col01"));
byte[] valueWeb01 = result1.getValue(Bytes.toBytes("web"), Bytes.toBytes("col02"));
/*
* getting the colum family: column qualifire values
* */
byte[] valueWebData = result1.getValue(Bytes.toBytes("websitedata"), Bytes.toBytes("col01"));
byte[] valueWebData01 = result1.getValue(Bytes.toBytes("websitedata"), Bytes.toBytes("col02"));
/*
* getting the colum family: column qualifire values
* */
String valueStr = Bytes.toString(valueWeb);
String valueStr1 = Bytes.toString(valueWeb01);
String valueWebdataStr = Bytes.toString(valueWebData);
String valueWebdataStr1 = Bytes.toString(valueWebData01);
System.out.println("GET: \n" + " web: " + valueStr + "\n web: " + valueStr1+"\n "+"Webdata: "+valueWebdataStr);
} catch (IOException e1) {
// TODO Auto-generated catch block
e1.printStackTrace();
}finally{
/*make sure the resultset is set to null befoer exiting the program
* In case its needed keep the object, but whenever the object is removed from the
* rs, please null it. Its a good programming practive.
*/
if(!result1.isEmpty());
result1=null;
}
ResultScanner rsScanner = null;
try {
Scan s = new Scan();
s.addColumn(Bytes.toBytes("web"), Bytes.toBytes("col01"));
s.addColumn(Bytes.toBytes("web"), Bytes.toBytes("col02"));
rsScanner = table.getScanner(s);
for (Result rr = rsScanner.next(); rr != null; rr = rsScanner.next()) {
System.out.println("Found row : " + rr);
}
} catch (Exception e) {
e.printStackTrace();
} finally {
// Make sure you close your scanners when you are done!
rsScanner.close();
}
}