当前位置: 首页>>代码示例>>Java>>正文


Java Result.isEmpty方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.client.Result.isEmpty方法的典型用法代码示例。如果您正苦于以下问题:Java Result.isEmpty方法的具体用法?Java Result.isEmpty怎么用?Java Result.isEmpty使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.client.Result的用法示例。


在下文中一共展示了Result.isEmpty方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getPermissions

import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
/**
 * Reads user permission assignments stored in the <code>l:</code> column
 * family of the first table row in <code>_acl_</code>.
 *
 * <p>
 * See {@link AccessControlLists class documentation} for the key structure
 * used for storage.
 * </p>
 */
static ListMultimap<String, TablePermission> getPermissions(Configuration conf,
    byte[] entryName) throws IOException {
  if (entryName == null) entryName = ACL_GLOBAL_NAME;

  // for normal user tables, we just read the table row from _acl_
  ListMultimap<String, TablePermission> perms = ArrayListMultimap.create();
  // TODO: Pass in a Connection rather than create one each time.
  try (Connection connection = ConnectionFactory.createConnection(conf)) {
    try (Table table = connection.getTable(ACL_TABLE_NAME)) {
      Get get = new Get(entryName);
      get.addFamily(ACL_LIST_FAMILY);
      Result row = table.get(get);
      if (!row.isEmpty()) {
        perms = parsePermissions(entryName, row);
      } else {
        LOG.info("No permissions found in " + ACL_TABLE_NAME + " for acl entry "
            + Bytes.toString(entryName));
      }
    }
  }

  return perms;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:33,代码来源:AccessControlLists.java

示例2: verifyNumericRows

import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
public void verifyNumericRows(HRegion region, final byte[] f, int startRow, int endRow,
    final boolean present) throws IOException {
  for (int i = startRow; i < endRow; i++) {
    String failMsg = "Failed verification of row :" + i;
    byte[] data = Bytes.toBytes(String.valueOf(i));
    Result result = region.get(new Get(data));

    boolean hasResult = result != null && !result.isEmpty();
    assertEquals(failMsg + result, present, hasResult);
    if (!present) continue;

    assertTrue(failMsg, result.containsColumn(f, null));
    assertEquals(failMsg, result.getColumnCells(f, null).size(), 1);
    Cell cell = result.getColumnLatestCell(f, null);
    assertTrue(failMsg,
      Bytes.equals(data, 0, data.length, cell.getValueArray(), cell.getValueOffset(),
        cell.getValueLength()));
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:HBaseTestingUtility.java

示例3: resultToString

import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
private String resultToString(Result result) {
  StringBuilder sb = new StringBuilder();
  sb.append("cells=");
  if(result.isEmpty()) {
    sb.append("NONE");
    return sb.toString();
  }
  sb.append("{");
  boolean moreThanOne = false;
  for(Cell cell : result.listCells()) {
    if(moreThanOne) {
      sb.append(", ");
    } else {
      moreThanOne = true;
    }
    sb.append(CellUtil.toString(cell, true));
  }
  sb.append("}");
  return sb.toString();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:21,代码来源:MultiThreadedAction.java

示例4: getServerUserRegions

import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
/**
 * @param connection connection we're using
 * @param serverName server whose regions we're interested in
 * @return List of user regions installed on this server (does not include
 * catalog regions).
 * @throws IOException
 */
public static NavigableMap<HRegionInfo, Result>
getServerUserRegions(Connection connection, final ServerName serverName)
  throws IOException {
  final NavigableMap<HRegionInfo, Result> hris = new TreeMap<HRegionInfo, Result>();
  // Fill the above hris map with entries from hbase:meta that have the passed
  // servername.
  CollectingVisitor<Result> v = new CollectingVisitor<Result>() {
    @Override
    void add(Result r) {
      if (r == null || r.isEmpty()) return;
      RegionLocations locations = getRegionLocations(r);
      if (locations == null) return;
      for (HRegionLocation loc : locations.getRegionLocations()) {
        if (loc != null) {
          if (loc.getServerName() != null && loc.getServerName().equals(serverName)) {
            hris.put(loc.getRegionInfo(), r);
          }
        }
      }
    }
  };
  fullScan(connection, v);
  return hris;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:32,代码来源:MetaTableAccessor.java

示例5: fullScanMetaAndPrint

import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
public static void fullScanMetaAndPrint(Connection connection)
  throws IOException {
  Visitor v = new Visitor() {
    @Override
    public boolean visit(Result r) throws IOException {
      if (r ==  null || r.isEmpty()) return true;
      LOG.info("fullScanMetaAndPrint.Current Meta Row: " + r);
      RegionLocations locations = getRegionLocations(r);
      if (locations == null) return true;
      for (HRegionLocation loc : locations.getRegionLocations()) {
        if (loc != null) {
          LOG.info("fullScanMetaAndPrint.HRI Print= " + loc.getRegionInfo());
        }
      }
      return true;
    }
  };
  fullScan(connection, v);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:MetaTableAccessor.java

示例6: fullScan

import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
/**
 * Performs a full scan of a catalog table.
 * @param connection connection we're using
 * @param visitor Visitor invoked against each row.
 * @param startrow Where to start the scan. Pass null if want to begin scan
 * at first row.
 * <code>hbase:meta</code>, the default (pass false to scan hbase:meta)
 * @throws IOException
 */
public static void fullScan(Connection connection,
  final Visitor visitor, final byte [] startrow)
throws IOException {
  Scan scan = new Scan();
  if (startrow != null) scan.setStartRow(startrow);
  if (startrow == null) {
    int caching = connection.getConfiguration()
        .getInt(HConstants.HBASE_META_SCANNER_CACHING, 100);
    scan.setCaching(caching);
  }
  scan.addFamily(HConstants.CATALOG_FAMILY);
  Table metaTable = getMetaHTable(connection);
  ResultScanner scanner = null;
  try {
    scanner = metaTable.getScanner(scan);
    Result data;
    while((data = scanner.next()) != null) {
      if (data.isEmpty()) continue;
      // Break if visit returns false.
      if (!visitor.visit(data)) break;
    }
  } finally {
    if (scanner != null) scanner.close();
    metaTable.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:36,代码来源:MetaTableAccessor.java

示例7: mapRow

import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
@Override
    public BaseDataPoint mapRow(Result result, int rowNum) throws Exception {
        if (result.isEmpty()) {
//            return Collections.emptyList();
            return null;
        }
        final byte[] distributedRowKey = result.getRow();
//        List<BaseDataPoint> dataPoints = new ArrayList<>();
        EasyHBaseBo bo = new EasyHBaseBo();
        for (Cell cell : result.rawCells()) {
            if (CellUtil.matchingFamily(cell, HBaseTables.EASYHBASE_CF)) {

                bo.setRowkey(Bytes.toString(cell.getRow()));
                bo.setValue(Bytes.toString(cell.getValue()));
                bo.setTimestamp(cell.getTimestamp());
//                dataPoints.add(bo);
//                List<T> candidates = new ArrayList<>();
//                for (T candidate : candidates) {
//                    candidate.setRowkey(candidate.getRowkey());
//                    candidate.setValue(candidate.getValue());
//                    candidate.setTimestamp(candidate.getTimestamp());
//                    dataPoints.add(candidate);
//                }
            }
        }
        // Reverse sort as timestamp is stored in a reversed order.
//        Collections.sort(dataPoints, REVERSE_TIMESTAMP_COMPARATOR);
        return bo;
    }
 
开发者ID:fchenxi,项目名称:easyhbase,代码行数:30,代码来源:EasyHBaseMapperV2.java

示例8: visit

import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
@Override
public boolean visit(Result r) throws IOException {
  if (r ==  null || r.isEmpty()) return true;
  // Check info:regioninfo, info:splitA, and info:splitB.  Make sure all
  // have migrated HRegionInfos.
  byte [] hriBytes = getBytes(r, HConstants.REGIONINFO_QUALIFIER);
  // Presumes that an edit updating all three cells either succeeds or
  // doesn't -- that we don't have case of info:regioninfo migrated but not
  // info:splitA.
  if (isMigrated(hriBytes)) return true;
  // OK. Need to migrate this row in meta.

  //This will 'migrate' the HRI from 092.x and 0.94.x to 0.96+ by reading the
  //writable serialization
  HRegionInfo hri = parseFrom(hriBytes);

  // Now make a put to write back to meta.
  Put p =  MetaTableAccessor.makePutFromRegionInfo(hri);

  // Now migrate info:splitA and info:splitB if they are not null
  migrateSplitIfNecessary(r, p, HConstants.SPLITA_QUALIFIER);
  migrateSplitIfNecessary(r, p, HConstants.SPLITB_QUALIFIER);

  MetaTableAccessor.putToMetaTable(this.services.getConnection(), p);
  if (LOG.isDebugEnabled()) {
    LOG.debug("Migrated " + Bytes.toString(p.getRow()));
  }
  numMigratedRows++;
  return true;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:31,代码来源:MetaMigrationConvertingToPB.java

示例9: get

import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
private NamespaceDescriptor get(Table table, String name) throws IOException {
  Result res = table.get(new Get(Bytes.toBytes(name)));
  if (res.isEmpty()) {
    return null;
  }
  byte[] val = CellUtil.cloneValue(res.getColumnLatestCell(
      HTableDescriptor.NAMESPACE_FAMILY_INFO_BYTES, HTableDescriptor.NAMESPACE_COL_DESC_BYTES));
  return
      ProtobufUtil.toNamespaceDescriptor(
          HBaseProtos.NamespaceDescriptor.parseFrom(val));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:12,代码来源:TableNamespaceManager.java

示例10: verifyResultsAndUpdateMetricsOnAPerGetBasis

import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
private void verifyResultsAndUpdateMetricsOnAPerGetBasis(boolean verify, Get get,
    Result result, Table table, boolean isNullExpected) throws IOException {
  if (!result.isEmpty()) {
    if (verify) {
      numKeysVerified.incrementAndGet();
    }
  } else {
    HRegionLocation hloc = connection.getRegionLocation(tableName,
      get.getRow(), false);
    String rowKey = Bytes.toString(get.getRow());
    LOG.info("Key = " + rowKey + ", Region location: " + hloc);
    if(isNullExpected) {
      nullResult.incrementAndGet();
      LOG.debug("Null result obtained for the key ="+rowKey);
      return;
    }
  }
  boolean isOk = verifyResultAgainstDataGenerator(result, verify, false);
  long numErrorsAfterThis = 0;
  if (isOk) {
    long cols = 0;
    // Count the columns for reporting purposes.
    for (byte[] cf : result.getMap().keySet()) {
      cols += result.getFamilyMap(cf).size();
    }
    numCols.addAndGet(cols);
  } else {
    if (writer != null) {
      LOG.error("At the time of failure, writer wrote " + writer.numKeys.get() + " keys");
    }
    numErrorsAfterThis = numReadErrors.incrementAndGet();
  }

  if (numErrorsAfterThis > maxErrors) {
    LOG.error("Aborting readers -- found more than " + maxErrors + " errors");
    aborted = true;
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:39,代码来源:MultiThreadedReader.java

示例11: assertGet

import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
private void assertGet(Region region, int value, boolean expect) throws IOException {
  byte[] row = Bytes.toBytes(String.valueOf(value));
  Get get = new Get(row);
  Result result = region.get(get);
  if (expect) {
    Assert.assertArrayEquals(row, result.getValue(f, null));
  } else {
    result.isEmpty();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:11,代码来源:TestRegionReplicas.java

示例12: assertGetRpc

import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
private void assertGetRpc(HRegionInfo info, int value, boolean expect)
    throws IOException, ServiceException {
  byte[] row = Bytes.toBytes(String.valueOf(value));
  Get get = new Get(row);
  ClientProtos.GetRequest getReq = RequestConverter.buildGetRequest(info.getRegionName(), get);
  ClientProtos.GetResponse getResp =  getRS().getRSRpcServices().get(null, getReq);
  Result result = ProtobufUtil.toResult(getResp.getResult());
  if (expect) {
    Assert.assertArrayEquals(row, result.getValue(f, null));
  } else {
    result.isEmpty();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:14,代码来源:TestRegionReplicas.java

示例13: getQuotas

import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
private static Quotas getQuotas(final Connection connection, final byte[] rowKey,
    final byte[] qualifier) throws IOException {
  Get get = new Get(rowKey);
  get.addColumn(QUOTA_FAMILY_INFO, qualifier);
  Result result = doGet(connection, get);
  if (result.isEmpty()) {
    return null;
  }
  return quotasFromData(result.getValue(QUOTA_FAMILY_INFO, qualifier));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:11,代码来源:QuotaTableUtil.java

示例14: RowResultGenerator

import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
public RowResultGenerator(final String tableName, final RowSpec rowspec,
    final Filter filter, final boolean cacheBlocks)
    throws IllegalArgumentException, IOException {
  Table table = RESTServlet.getInstance().getTable(tableName);
  try {
    Get get = new Get(rowspec.getRow());
    if (rowspec.hasColumns()) {
      for (byte[] col: rowspec.getColumns()) {
        byte[][] split = KeyValue.parseColumn(col);
        if (split.length == 1) {
          get.addFamily(split[0]);
        } else if (split.length == 2) {
          get.addColumn(split[0], split[1]);
        } else {
          throw new IllegalArgumentException("Invalid column specifier.");
        }
      }
    }
    get.setTimeRange(rowspec.getStartTime(), rowspec.getEndTime());
    get.setMaxVersions(rowspec.getMaxVersions());
    if (filter != null) {
      get.setFilter(filter);
    }
    get.setCacheBlocks(cacheBlocks);
    Result result = table.get(get);
    if (result != null && !result.isEmpty()) {
      valuesI = result.listCells().iterator();
    }
  } catch (DoNotRetryIOException | NeedUnmanagedConnectionException e) {
    // Warn here because Stargate will return 404 in the case if multiple
    // column families were specified but one did not exist -- currently
    // HBase will fail the whole Get.
    // Specifying multiple columns in a URI should be uncommon usage but
    // help to avoid confusion by leaving a record of what happened here in
    // the log.
    LOG.warn(StringUtils.stringifyException(e));
  } finally {
    table.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:41,代码来源:RowResultGenerator.java

示例15: getDataFromHbaseRest

import org.apache.hadoop.hbase.client.Result; //导入方法依赖的package包/类
public static void getDataFromHbaseRest() {
  ResultScanner scanner = null;// it needs to be initialized to null
  Cluster hbaseCluster = new Cluster();//Creating and cluster object
  hbaseCluster.add("172.28.182.45", 8080);//passing the IP and post 
  // Create Rest client instance and get the connection
  Client restClient = new Client(hbaseCluster);//pass the cluster object to the cliet
  table = new RemoteHTable(restClient, "mywebproject:myclickstream");// Makes a Remote Call 
  Get get = new Get(Bytes.toBytes("row02"));//Gets the row in question
  Result result1=null;// initilizing it to null
  try {
    result1 = table.get(get);// getting the table and the connection object
    byte[] valueWeb = result1.getValue(Bytes.toBytes("web"), Bytes.toBytes("col01"));
    byte[] valueWeb01 = result1.getValue(Bytes.toBytes("web"), Bytes.toBytes("col02"));
    /*
     * getting the  colum family: column qualifire values 
     * */
    byte[] valueWebData = result1.getValue(Bytes.toBytes("websitedata"), Bytes.toBytes("col01"));
    byte[] valueWebData01 = result1.getValue(Bytes.toBytes("websitedata"), Bytes.toBytes("col02"));
    /*
     * getting the  colum family: column qualifire values 
     * */
    String valueStr = Bytes.toString(valueWeb);
    String valueStr1 = Bytes.toString(valueWeb01);
    String valueWebdataStr = Bytes.toString(valueWebData);
    String valueWebdataStr1 = Bytes.toString(valueWebData01);
    
    System.out.println("GET: \n" + " web: " + valueStr + "\n web: " + valueStr1+"\n "+"Webdata: "+valueWebdataStr);
  } catch (IOException e1) {
    // TODO Auto-generated catch block
    e1.printStackTrace();
  }finally{
    /*make sure the resultset is set to null befoer exiting the program
     * In case its needed keep the object, but whenever the object is removed from the
     * rs, please null it. Its a good programming practive.
    */
    if(!result1.isEmpty());
    result1=null;
  }
  ResultScanner rsScanner = null;
  try {
    Scan s = new Scan();
    s.addColumn(Bytes.toBytes("web"), Bytes.toBytes("col01"));
    s.addColumn(Bytes.toBytes("web"), Bytes.toBytes("col02"));
    rsScanner = table.getScanner(s);

    for (Result rr = rsScanner.next(); rr != null; rr = rsScanner.next()) {
      System.out.println("Found row : " + rr);
    }
  } catch (Exception e) {
    e.printStackTrace();
  } finally {
    // Make sure you close your scanners when you are done!
    rsScanner.close();
  }
}
 
开发者ID:PacktPublishing,项目名称:HBase-High-Performance-Cookbook,代码行数:56,代码来源:package Col;.java


注:本文中的org.apache.hadoop.hbase.client.Result.isEmpty方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。