當前位置: 首頁>>代碼示例>>Java>>正文


Java Get類代碼示例

本文整理匯總了Java中org.apache.hadoop.hbase.client.Get的典型用法代碼示例。如果您正苦於以下問題:Java Get類的具體用法?Java Get怎麽用?Java Get使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。


Get類屬於org.apache.hadoop.hbase.client包,在下文中一共展示了Get類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: deleteAndWait

import org.apache.hadoop.hbase.client.Get; //導入依賴的package包/類
private void deleteAndWait(byte[] row, Table source, Table... targets)
throws Exception {
  Delete del = new Delete(row);
  source.delete(del);

  Get get = new Get(row);
  for (int i = 0; i < NB_RETRIES; i++) {
    if (i==NB_RETRIES-1) {
      fail("Waited too much time for del replication");
    }
    boolean removedFromAll = true;
    for (Table target : targets) {
      Result res = target.get(get);
      if (res.size() >= 1) {
        LOG.info("Row not deleted");
        removedFromAll = false;
        break;
      }
    }
    if (removedFromAll) {
      break;
    } else {
      Thread.sleep(SLEEP_TIME);
    }
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:27,代碼來源:TestMultiSlaveReplication.java

示例2: buildGetRowOrBeforeRequest

import org.apache.hadoop.hbase.client.Get; //導入依賴的package包/類
/**
 * Create a new protocol buffer GetRequest to get a row, all columns in a family.
 * If there is no such row, return the closest row before it.
 *
 * @param regionName the name of the region to get
 * @param row the row to get
 * @param family the column family to get
 * should return the immediate row before
 * @return a protocol buffer GetReuqest
 */
public static GetRequest buildGetRowOrBeforeRequest(
    final byte[] regionName, final byte[] row, final byte[] family) {
  GetRequest.Builder builder = GetRequest.newBuilder();
  RegionSpecifier region = buildRegionSpecifier(
    RegionSpecifierType.REGION_NAME, regionName);
  builder.setRegion(region);

  Column.Builder columnBuilder = Column.newBuilder();
  columnBuilder.setFamily(ByteStringer.wrap(family));
  ClientProtos.Get.Builder getBuilder =
    ClientProtos.Get.newBuilder();
  getBuilder.setRow(ByteStringer.wrap(row));
  getBuilder.addColumn(columnBuilder.build());
  getBuilder.setClosestRowBefore(true);
  builder.setGet(getBuilder.build());
  return builder.build();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:28,代碼來源:RequestConverter.java

示例3: get

import org.apache.hadoop.hbase.client.Get; //導入依賴的package包/類
@Override
public <T> List<T> get(TableName tableName, final List<Get> getList, final RowMapper<T>
        mapper) {
    assertAccessAvailable();
    return execute(tableName, new TableCallback<List<T>>() {
        @Override
        public List<T> doInTable(Table table) throws Throwable {
            Result[] result = table.get(getList);
            List<T> list = new ArrayList<>(result.length);
            for (int i = 0; i < result.length; i++) {
                T t = mapper.mapRow(result[i], i);
                list.add(t);
            }
            return list;
        }
    });
}
 
開發者ID:fchenxi,項目名稱:easyhbase,代碼行數:18,代碼來源:HbaseTemplate2.java

示例4: verifyNumericRows

import org.apache.hadoop.hbase.client.Get; //導入依賴的package包/類
public void verifyNumericRows(HRegion region, final byte[] f, int startRow, int endRow,
    final boolean present) throws IOException {
  for (int i = startRow; i < endRow; i++) {
    String failMsg = "Failed verification of row :" + i;
    byte[] data = Bytes.toBytes(String.valueOf(i));
    Result result = region.get(new Get(data));

    boolean hasResult = result != null && !result.isEmpty();
    assertEquals(failMsg + result, present, hasResult);
    if (!present) continue;

    assertTrue(failMsg, result.containsColumn(f, null));
    assertEquals(failMsg, result.getColumnCells(f, null).size(), 1);
    Cell cell = result.getColumnLatestCell(f, null);
    assertTrue(failMsg,
      Bytes.equals(data, 0, data.length, cell.getValueArray(), cell.getValueOffset(),
        cell.getValueLength()));
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:20,代碼來源:HBaseTestingUtility.java

示例5: createGet

import org.apache.hadoop.hbase.client.Get; //導入依賴的package包/類
protected Get createGet(long keyToRead) throws IOException {
  Get get = new Get(dataGenerator.getDeterministicUniqueKey(keyToRead));
  String cfsString = "";
  byte[][] columnFamilies = dataGenerator.getColumnFamilies();
  for (byte[] cf : columnFamilies) {
    get.addFamily(cf);
    if (verbose) {
      if (cfsString.length() > 0) {
        cfsString += ", ";
      }
      cfsString += "[" + Bytes.toStringBinary(cf) + "]";
    }
  }
  get = dataGenerator.beforeGet(keyToRead, get);
  if (regionReplicaId > 0) {
    get.setReplicaId(regionReplicaId);
    get.setConsistency(Consistency.TIMELINE);
  }
  if (verbose) {
    LOG.info("[" + readerId + "] " + "Querying key " + keyToRead + ", cfs " + cfsString);
  }
  return get;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:24,代碼來源:MultiThreadedReader.java

示例6: next

import org.apache.hadoop.hbase.client.Get; //導入依賴的package包/類
@Override public Result next() throws IOException {
  if (rawTable == null) return null;
  if (localCache.isEmpty()) {
    // load cache by batch get
    int size = Math.min(rowkeyQueue.size(), LOCAL_CACHE_SIZE);
    List<Get> gets = new ArrayList<>(size);
    for (int i = 0; i < size; i++) {
      gets.add(new Get(rowkeyQueue.poll()));
    }
    Result[] results = rawTable.get(gets);
    for (Result res : results) {
      localCache.add(res);
    }
  }
  if (localCache.isEmpty()) {
    // still empty, no more result, set rawTable to null
    rawTable.close();
    rawTable = null;
    return null;
  }
  return localCache.poll();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:23,代碼來源:GSScannerCaching.java

示例7: getRegionLocation

import org.apache.hadoop.hbase.client.Get; //導入依賴的package包/類
/**
 * Returns the HRegionLocation from meta for the given region
 * @param connection connection we're using
 * @param regionName region we're looking for
 * @return HRegionLocation for the given region
 * @throws IOException
 */
public static HRegionLocation getRegionLocation(Connection connection,
                                                byte[] regionName) throws IOException {
  byte[] row = regionName;
  HRegionInfo parsedInfo = null;
  try {
    parsedInfo = parseRegionInfoFromRegionName(regionName);
    row = getMetaKeyForRegion(parsedInfo);
  } catch (Exception parseEx) {
    // Ignore. This is used with tableName passed as regionName.
  }
  Get get = new Get(row);
  get.addFamily(HConstants.CATALOG_FAMILY);
  Result r = get(getMetaHTable(connection), get);
  RegionLocations locations = getRegionLocations(r);
  return locations == null
    ? null
    : locations.getRegionLocation(parsedInfo == null ? 0 : parsedInfo.getReplicaId());
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:26,代碼來源:MetaTableAccessor.java

示例8: testReplayingFlushRequestRestoresReadsEnabledState

import org.apache.hadoop.hbase.client.Get; //導入依賴的package包/類
/**
 * Test the case where the secondary region replica is not in reads enabled state because it is
 * waiting for a flush or region open marker from primary region. Replaying CANNOT_FLUSH
 * flush marker entry should restore the reads enabled status in the region and allow the reads
 * to continue.
 */
@Test
public void testReplayingFlushRequestRestoresReadsEnabledState() throws IOException {
  disableReads(secondaryRegion);

  // Test case 1: Test that replaying CANNOT_FLUSH request marker assuming this came from
  // triggered flush restores readsEnabled
  primaryRegion.flushcache(true, true);
  reader = createWALReaderForPrimary();
  while (true) {
    WAL.Entry entry = reader.next();
    if (entry == null) {
      break;
    }
    FlushDescriptor flush = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0));
    if (flush != null) {
      secondaryRegion.replayWALFlushMarker(flush, entry.getKey().getLogSeqNum());
    }
  }

  // now reads should be enabled
  secondaryRegion.get(new Get(Bytes.toBytes(0)));
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:29,代碼來源:TestHRegionReplayEvents.java

示例9: wait

import org.apache.hadoop.hbase.client.Get; //導入依賴的package包/類
private void wait(byte[] row, Table target, boolean isDeleted)
    throws Exception {
  Get get = new Get(row);
  for (int i = 0; i < NB_RETRIES; i++) {
    if (i == NB_RETRIES - 1) {
      fail("Waited too much time for replication. Row:" + Bytes.toString(row)
          + ". IsDeleteReplication:" + isDeleted);
    }
    Result res = target.get(get);
    boolean sleep = isDeleted ? res.size() > 0 : res.size() == 0;
    if (sleep) {
      LOG.info("Waiting for more time for replication. Row:"
          + Bytes.toString(row) + ". IsDeleteReplication:" + isDeleted);
      Thread.sleep(SLEEP_TIME);
    } else {
      if (!isDeleted) {
        assertArrayEquals(res.value(), row);
      }
      LOG.info("Obtained row:"
          + Bytes.toString(row) + ". IsDeleteReplication:" + isDeleted);
      break;
    }
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:25,代碼來源:TestMasterReplication.java

示例10: checkWithWait

import org.apache.hadoop.hbase.client.Get; //導入依賴的package包/類
private void checkWithWait(byte[] row, int count, Table table) throws Exception {
  Get get = new Get(row);
  for (int i = 0; i < NB_RETRIES; i++) {
    if (i == NB_RETRIES - 1) {
      fail("Waited too much time while getting the row.");
    }
    boolean rowReplicated = false;
    Result res = table.get(get);
    if (res.size() >= 1) {
      LOG.info("Row is replicated");
      rowReplicated = true;
      assertEquals("Table '" + table + "' did not have the expected number of  results.",
          count, res.size());
      break;
    }
    if (rowReplicated) {
      break;
    } else {
      Thread.sleep(SLEEP_TIME);
    }
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:23,代碼來源:TestMultiSlaveReplication.java

示例11: testNamespaceUserGrant

import org.apache.hadoop.hbase.client.Get; //導入依賴的package包/類
@Test (timeout=180000)
public void testNamespaceUserGrant() throws Exception {
  AccessTestAction getAction = new AccessTestAction() {
    @Override
    public Object run() throws Exception {
      try(Connection conn = ConnectionFactory.createConnection(conf);
          Table t = conn.getTable(TEST_TABLE);) {
        return t.get(new Get(TEST_ROW));
      }
    }
  };

  String namespace = TEST_TABLE.getNamespaceAsString();

  // Grant namespace READ to USER_NONE, this should supersede any table permissions
  grantOnNamespace(TEST_UTIL, USER_NONE.getShortName(), namespace, Permission.Action.READ);
  // Now USER_NONE should be able to read
  verifyAllowed(getAction, USER_NONE);

  // Revoke namespace READ to USER_NONE
  revokeFromNamespace(TEST_UTIL, USER_NONE.getShortName(), namespace, Permission.Action.READ);
  verifyDenied(getAction, USER_NONE);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:24,代碼來源:TestAccessController.java

示例12: blockUntilRegionIsOpened

import org.apache.hadoop.hbase.client.Get; //導入依賴的package包/類
public static void blockUntilRegionIsOpened(Configuration conf, long timeout, HRegionInfo hri)
    throws IOException, InterruptedException {
  log("blocking until region is opened for reading:" + hri.getRegionNameAsString());
  long start = System.currentTimeMillis();
  try (Connection conn = ConnectionFactory.createConnection(conf);
      Table table = conn.getTable(hri.getTable())) {
    byte[] row = hri.getStartKey();
    // Check for null/empty row. If we find one, use a key that is likely to be in first region.
    if (row == null || row.length <= 0) row = new byte[] { '0' };
    Get get = new Get(row);
    while (System.currentTimeMillis() - start < timeout) {
      try {
        table.get(get);
        break;
      } catch (IOException ex) {
        // wait some more
      }
      Threads.sleep(10);
    }
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:22,代碼來源:TestEndToEndSplitTransaction.java

示例13: doGet

import org.apache.hadoop.hbase.client.Get; //導入依賴的package包/類
/**
 * Gets a Table for this table, does the get and closes the Table
 */
public Result doGet(final Get get) {
    Result result;
    final Table tableInterface = getTable();
    try {
        result = doGet(tableInterface, get);
    } finally {
        closeTable(tableInterface);
    }
    return result;
}
 
開發者ID:gchq,項目名稱:stroom-stats,代碼行數:14,代碼來源:HBaseTable.java

示例14: testMergeTool

import org.apache.hadoop.hbase.client.Get; //導入依賴的package包/類
/**
 * Test merge tool.
 * @throws Exception
 */
public void testMergeTool() throws Exception {
  // First verify we can read the rows from the source regions and that they
  // contain the right data.
  for (int i = 0; i < regions.length; i++) {
    for (int j = 0; j < rows[i].length; j++) {
      Get get = new Get(rows[i][j]);
      get.addFamily(FAMILY);
      Result result = regions[i].get(get);
      byte [] bytes =  CellUtil.cloneValue(result.rawCells()[0]);
      assertNotNull(bytes);
      assertTrue(Bytes.equals(bytes, rows[i][j]));
    }
    // Close the region and delete the log
    HRegion.closeHRegion(regions[i]);
  }
  WAL log = wals.getWAL(new byte[]{});
   // Merge Region 0 and Region 1
  HRegion merged = mergeAndVerify("merging regions 0 and 1 ",
    this.sourceRegions[0].getRegionNameAsString(),
    this.sourceRegions[1].getRegionNameAsString(), log, 2);

  // Merge the result of merging regions 0 and 1 with region 2
  merged = mergeAndVerify("merging regions 0+1 and 2",
    merged.getRegionInfo().getRegionNameAsString(),
    this.sourceRegions[2].getRegionNameAsString(), log, 3);

  // Merge the result of merging regions 0, 1 and 2 with region 3
  merged = mergeAndVerify("merging regions 0+1+2 and 3",
    merged.getRegionInfo().getRegionNameAsString(),
    this.sourceRegions[3].getRegionNameAsString(), log, 4);

  // Merge the result of merging regions 0, 1, 2 and 3 with region 4
  merged = mergeAndVerify("merging regions 0+1+2+3 and 4",
    merged.getRegionInfo().getRegionNameAsString(),
    this.sourceRegions[4].getRegionNameAsString(), log, rows.length);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:41,代碼來源:TestMergeTool.java

示例15: testScanAcrossManySmallColumns

import org.apache.hadoop.hbase.client.Get; //導入依賴的package包/類
/**
 * Usecase:
 *
 *  - create a row with 1M cells, 10 bytes in each
 *  - flush & run major compaction
 *  - try to Get whole row.
 *
 *  OOME happened in StoreScanner.next(..).
 *
 * @throws IOException
 */
@Test(expected = RowTooBigException.class)
public void testScanAcrossManySmallColumns() throws IOException {
  byte[] row1 = Bytes.toBytes("row1");
  byte[] fam1 = Bytes.toBytes("fam1");

  HTableDescriptor htd = TEST_HTD;
  HColumnDescriptor hcd = new HColumnDescriptor(fam1);
  if (htd.hasFamily(hcd.getName())) {
    htd.modifyFamily(hcd);
  } else {
    htd.addFamily(hcd);
  }

  final HRegionInfo hri =
    new HRegionInfo(htd.getTableName(), HConstants.EMPTY_END_ROW,
      HConstants.EMPTY_END_ROW);
  Region region = HTU.createHRegion(hri, rootRegionDir, HTU.getConfiguration(), htd);
  try {
    // Add to memstore
    for (int i = 0; i < 10; i++) {
      Put put = new Put(row1);
      for (int j = 0; j < 10 * 10000; j++) {
        put.add(fam1, Bytes.toBytes("col_" + i + "_" + j), new byte[10]);
      }
      region.put(put);
      region.flush(true);
    }
    region.compact(true);

    Get get = new Get(row1);
    region.get(get);
  } finally {
    HBaseTestingUtility.closeRegion(region);
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:47,代碼來源:TestRowTooBig.java


注:本文中的org.apache.hadoop.hbase.client.Get類示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。