本文整理汇总了Java中org.apache.hadoop.hbase.client.Result.isEmpty方法的典型用法代码示例。如果您正苦于以下问题:Java Result.isEmpty方法的具体用法?Java Result.isEmpty怎么用?Java Result.isEmpty使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.client.Result
的用法示例。
在下文中一共展示了Result.isEmpty方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getPermissions
import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
/**
* Reads user permission assignments stored in the <code>l:</code> column
* family of the first table row in <code>_acl_</code>.
*
* <p>
* See {@link AccessControlLists class documentation} for the key structure
* used for storage.
* </p>
*/
static ListMultimap<String, TablePermission> getPermissions(Configuration conf,
byte[] entryName) throws IOException {
if (entryName == null) entryName = ACL_GLOBAL_NAME;
// for normal user tables, we just read the table row from _acl_
ListMultimap<String, TablePermission> perms = ArrayListMultimap.create();
// TODO: Pass in a Connection rather than create one each time.
try (Connection connection = ConnectionFactory.createConnection(conf)) {
try (Table table = connection.getTable(ACL_TABLE_NAME)) {
Get get = new Get(entryName);
get.addFamily(ACL_LIST_FAMILY);
Result row = table.get(get);
if (!row.isEmpty()) {
perms = parsePermissions(entryName, row);
} else {
LOG.info("No permissions found in " + ACL_TABLE_NAME + " for acl entry "
+ Bytes.toString(entryName));
}
}
}
return perms;
}
示例2: verifyNumericRows
import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
public void verifyNumericRows(HRegion region, final byte[] f, int startRow, int endRow,
final boolean present) throws IOException {
for (int i = startRow; i < endRow; i++) {
String failMsg = "Failed verification of row :" + i;
byte[] data = Bytes.toBytes(String.valueOf(i));
Result result = region.get(new Get(data));
boolean hasResult = result != null && !result.isEmpty();
assertEquals(failMsg + result, present, hasResult);
if (!present) continue;
assertTrue(failMsg, result.containsColumn(f, null));
assertEquals(failMsg, result.getColumnCells(f, null).size(), 1);
Cell cell = result.getColumnLatestCell(f, null);
assertTrue(failMsg,
Bytes.equals(data, 0, data.length, cell.getValueArray(), cell.getValueOffset(),
cell.getValueLength()));
}
}
示例3: resultToString
import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
private String resultToString(Result result) {
StringBuilder sb = new StringBuilder();
sb.append("cells=");
if(result.isEmpty()) {
sb.append("NONE");
return sb.toString();
}
sb.append("{");
boolean moreThanOne = false;
for(Cell cell : result.listCells()) {
if(moreThanOne) {
sb.append(", ");
} else {
moreThanOne = true;
}
sb.append(CellUtil.toString(cell, true));
}
sb.append("}");
return sb.toString();
}
示例4: getServerUserRegions
import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
/**
* @param connection connection we're using
* @param serverName server whose regions we're interested in
* @return List of user regions installed on this server (does not include
* catalog regions).
* @throws IOException
*/
public static NavigableMap<HRegionInfo, Result>
getServerUserRegions(Connection connection, final ServerName serverName)
throws IOException {
final NavigableMap<HRegionInfo, Result> hris = new TreeMap<HRegionInfo, Result>();
// Fill the above hris map with entries from hbase:meta that have the passed
// servername.
CollectingVisitor<Result> v = new CollectingVisitor<Result>() {
@Override
void add(Result r) {
if (r == null || r.isEmpty()) return;
RegionLocations locations = getRegionLocations(r);
if (locations == null) return;
for (HRegionLocation loc : locations.getRegionLocations()) {
if (loc != null) {
if (loc.getServerName() != null && loc.getServerName().equals(serverName)) {
hris.put(loc.getRegionInfo(), r);
}
}
}
}
};
fullScan(connection, v);
return hris;
}
示例5: fullScanMetaAndPrint
import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
public static void fullScanMetaAndPrint(Connection connection)
throws IOException {
Visitor v = new Visitor() {
@Override
public boolean visit(Result r) throws IOException {
if (r == null || r.isEmpty()) return true;
LOG.info("fullScanMetaAndPrint.Current Meta Row: " + r);
RegionLocations locations = getRegionLocations(r);
if (locations == null) return true;
for (HRegionLocation loc : locations.getRegionLocations()) {
if (loc != null) {
LOG.info("fullScanMetaAndPrint.HRI Print= " + loc.getRegionInfo());
}
}
return true;
}
};
fullScan(connection, v);
}
示例6: fullScan
import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
/**
* Performs a full scan of a catalog table.
* @param connection connection we're using
* @param visitor Visitor invoked against each row.
* @param startrow Where to start the scan. Pass null if want to begin scan
* at first row.
* <code>hbase:meta</code>, the default (pass false to scan hbase:meta)
* @throws IOException
*/
public static void fullScan(Connection connection,
final Visitor visitor, final byte [] startrow)
throws IOException {
Scan scan = new Scan();
if (startrow != null) scan.setStartRow(startrow);
if (startrow == null) {
int caching = connection.getConfiguration()
.getInt(HConstants.HBASE_META_SCANNER_CACHING, 100);
scan.setCaching(caching);
}
scan.addFamily(HConstants.CATALOG_FAMILY);
Table metaTable = getMetaHTable(connection);
ResultScanner scanner = null;
try {
scanner = metaTable.getScanner(scan);
Result data;
while((data = scanner.next()) != null) {
if (data.isEmpty()) continue;
// Break if visit returns false.
if (!visitor.visit(data)) break;
}
} finally {
if (scanner != null) scanner.close();
metaTable.close();
}
}
示例7: mapRow
import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
@Override
public BaseDataPoint mapRow(Result result, int rowNum) throws Exception {
if (result.isEmpty()) {
// return Collections.emptyList();
return null;
}
final byte[] distributedRowKey = result.getRow();
// List<BaseDataPoint> dataPoints = new ArrayList<>();
EasyHBaseBo bo = new EasyHBaseBo();
for (Cell cell : result.rawCells()) {
if (CellUtil.matchingFamily(cell, HBaseTables.EASYHBASE_CF)) {
bo.setRowkey(Bytes.toString(cell.getRow()));
bo.setValue(Bytes.toString(cell.getValue()));
bo.setTimestamp(cell.getTimestamp());
// dataPoints.add(bo);
// List<T> candidates = new ArrayList<>();
// for (T candidate : candidates) {
// candidate.setRowkey(candidate.getRowkey());
// candidate.setValue(candidate.getValue());
// candidate.setTimestamp(candidate.getTimestamp());
// dataPoints.add(candidate);
// }
}
}
// Reverse sort as timestamp is stored in a reversed order.
// Collections.sort(dataPoints, REVERSE_TIMESTAMP_COMPARATOR);
return bo;
}
示例8: visit
import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
@Override
public boolean visit(Result r) throws IOException {
if (r == null || r.isEmpty()) return true;
// Check info:regioninfo, info:splitA, and info:splitB. Make sure all
// have migrated HRegionInfos.
byte [] hriBytes = getBytes(r, HConstants.REGIONINFO_QUALIFIER);
// Presumes that an edit updating all three cells either succeeds or
// doesn't -- that we don't have case of info:regioninfo migrated but not
// info:splitA.
if (isMigrated(hriBytes)) return true;
// OK. Need to migrate this row in meta.
//This will 'migrate' the HRI from 092.x and 0.94.x to 0.96+ by reading the
//writable serialization
HRegionInfo hri = parseFrom(hriBytes);
// Now make a put to write back to meta.
Put p = MetaTableAccessor.makePutFromRegionInfo(hri);
// Now migrate info:splitA and info:splitB if they are not null
migrateSplitIfNecessary(r, p, HConstants.SPLITA_QUALIFIER);
migrateSplitIfNecessary(r, p, HConstants.SPLITB_QUALIFIER);
MetaTableAccessor.putToMetaTable(this.services.getConnection(), p);
if (LOG.isDebugEnabled()) {
LOG.debug("Migrated " + Bytes.toString(p.getRow()));
}
numMigratedRows++;
return true;
}
示例9: get
import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
private NamespaceDescriptor get(Table table, String name) throws IOException {
Result res = table.get(new Get(Bytes.toBytes(name)));
if (res.isEmpty()) {
return null;
}
byte[] val = CellUtil.cloneValue(res.getColumnLatestCell(
HTableDescriptor.NAMESPACE_FAMILY_INFO_BYTES, HTableDescriptor.NAMESPACE_COL_DESC_BYTES));
return
ProtobufUtil.toNamespaceDescriptor(
HBaseProtos.NamespaceDescriptor.parseFrom(val));
}
示例10: verifyResultsAndUpdateMetricsOnAPerGetBasis
import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
private void verifyResultsAndUpdateMetricsOnAPerGetBasis(boolean verify, Get get,
Result result, Table table, boolean isNullExpected) throws IOException {
if (!result.isEmpty()) {
if (verify) {
numKeysVerified.incrementAndGet();
}
} else {
HRegionLocation hloc = connection.getRegionLocation(tableName,
get.getRow(), false);
String rowKey = Bytes.toString(get.getRow());
LOG.info("Key = " + rowKey + ", Region location: " + hloc);
if(isNullExpected) {
nullResult.incrementAndGet();
LOG.debug("Null result obtained for the key ="+rowKey);
return;
}
}
boolean isOk = verifyResultAgainstDataGenerator(result, verify, false);
long numErrorsAfterThis = 0;
if (isOk) {
long cols = 0;
// Count the columns for reporting purposes.
for (byte[] cf : result.getMap().keySet()) {
cols += result.getFamilyMap(cf).size();
}
numCols.addAndGet(cols);
} else {
if (writer != null) {
LOG.error("At the time of failure, writer wrote " + writer.numKeys.get() + " keys");
}
numErrorsAfterThis = numReadErrors.incrementAndGet();
}
if (numErrorsAfterThis > maxErrors) {
LOG.error("Aborting readers -- found more than " + maxErrors + " errors");
aborted = true;
}
}
示例11: assertGet
import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
private void assertGet(Region region, int value, boolean expect) throws IOException {
byte[] row = Bytes.toBytes(String.valueOf(value));
Get get = new Get(row);
Result result = region.get(get);
if (expect) {
Assert.assertArrayEquals(row, result.getValue(f, null));
} else {
result.isEmpty();
}
}
示例12: assertGetRpc
import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
private void assertGetRpc(HRegionInfo info, int value, boolean expect)
throws IOException, ServiceException {
byte[] row = Bytes.toBytes(String.valueOf(value));
Get get = new Get(row);
ClientProtos.GetRequest getReq = RequestConverter.buildGetRequest(info.getRegionName(), get);
ClientProtos.GetResponse getResp = getRS().getRSRpcServices().get(null, getReq);
Result result = ProtobufUtil.toResult(getResp.getResult());
if (expect) {
Assert.assertArrayEquals(row, result.getValue(f, null));
} else {
result.isEmpty();
}
}
示例13: getQuotas
import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
private static Quotas getQuotas(final Connection connection, final byte[] rowKey,
final byte[] qualifier) throws IOException {
Get get = new Get(rowKey);
get.addColumn(QUOTA_FAMILY_INFO, qualifier);
Result result = doGet(connection, get);
if (result.isEmpty()) {
return null;
}
return quotasFromData(result.getValue(QUOTA_FAMILY_INFO, qualifier));
}
示例14: RowResultGenerator
import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
public RowResultGenerator(final String tableName, final RowSpec rowspec,
final Filter filter, final boolean cacheBlocks)
throws IllegalArgumentException, IOException {
Table table = RESTServlet.getInstance().getTable(tableName);
try {
Get get = new Get(rowspec.getRow());
if (rowspec.hasColumns()) {
for (byte[] col: rowspec.getColumns()) {
byte[][] split = KeyValue.parseColumn(col);
if (split.length == 1) {
get.addFamily(split[0]);
} else if (split.length == 2) {
get.addColumn(split[0], split[1]);
} else {
throw new IllegalArgumentException("Invalid column specifier.");
}
}
}
get.setTimeRange(rowspec.getStartTime(), rowspec.getEndTime());
get.setMaxVersions(rowspec.getMaxVersions());
if (filter != null) {
get.setFilter(filter);
}
get.setCacheBlocks(cacheBlocks);
Result result = table.get(get);
if (result != null && !result.isEmpty()) {
valuesI = result.listCells().iterator();
}
} catch (DoNotRetryIOException | NeedUnmanagedConnectionException e) {
// Warn here because Stargate will return 404 in the case if multiple
// column families were specified but one did not exist -- currently
// HBase will fail the whole Get.
// Specifying multiple columns in a URI should be uncommon usage but
// help to avoid confusion by leaving a record of what happened here in
// the log.
LOG.warn(StringUtils.stringifyException(e));
} finally {
table.close();
}
}
示例15: getDataFromHbaseRest
import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
public static void getDataFromHbaseRest() {
ResultScanner scanner = null;// it needs to be initialized to null
Cluster hbaseCluster = new Cluster();//Creating and cluster object
hbaseCluster.add("172.28.182.45", 8080);//passing the IP and post
// Create Rest client instance and get the connection
Client restClient = new Client(hbaseCluster);//pass the cluster object to the cliet
table = new RemoteHTable(restClient, "mywebproject:myclickstream");// Makes a Remote Call
Get get = new Get(Bytes.toBytes("row02"));//Gets the row in question
Result result1=null;// initilizing it to null
try {
result1 = table.get(get);// getting the table and the connection object
byte[] valueWeb = result1.getValue(Bytes.toBytes("web"), Bytes.toBytes("col01"));
byte[] valueWeb01 = result1.getValue(Bytes.toBytes("web"), Bytes.toBytes("col02"));
/*
* getting the colum family: column qualifire values
* */
byte[] valueWebData = result1.getValue(Bytes.toBytes("websitedata"), Bytes.toBytes("col01"));
byte[] valueWebData01 = result1.getValue(Bytes.toBytes("websitedata"), Bytes.toBytes("col02"));
/*
* getting the colum family: column qualifire values
* */
String valueStr = Bytes.toString(valueWeb);
String valueStr1 = Bytes.toString(valueWeb01);
String valueWebdataStr = Bytes.toString(valueWebData);
String valueWebdataStr1 = Bytes.toString(valueWebData01);
System.out.println("GET: \n" + " web: " + valueStr + "\n web: " + valueStr1+"\n "+"Webdata: "+valueWebdataStr);
} catch (IOException e1) {
// TODO Auto-generated catch block
e1.printStackTrace();
}finally{
/*make sure the resultset is set to null befoer exiting the program
* In case its needed keep the object, but whenever the object is removed from the
* rs, please null it. Its a good programming practive.
*/
if(!result1.isEmpty());
result1=null;
}
ResultScanner rsScanner = null;
try {
Scan s = new Scan();
s.addColumn(Bytes.toBytes("web"), Bytes.toBytes("col01"));
s.addColumn(Bytes.toBytes("web"), Bytes.toBytes("col02"));
rsScanner = table.getScanner(s);
for (Result rr = rsScanner.next(); rr != null; rr = rsScanner.next()) {
System.out.println("Found row : " + rr);
}
} catch (Exception e) {
e.printStackTrace();
} finally {
// Make sure you close your scanners when you are done!
rsScanner.close();
}
}