本文整理汇总了Java中org.apache.hadoop.hbase.client.Get.setTimeRange方法的典型用法代码示例。如果您正苦于以下问题:Java Get.setTimeRange方法的具体用法?Java Get.setTimeRange怎么用?Java Get.setTimeRange使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.client.Get
的用法示例。
在下文中一共展示了Get.setTimeRange方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getVerTs
import org.apache.hadoop.hbase.client.Get; //导入方法依赖的package包/类
/**
* Note: this internal interface is slightly different from public APIs in regard to handling
* of the qualifier. Here we differ from the public Java API in that null != byte[0]. Rather,
* we respect qual == null as a request for the entire column family. The caller (
* {@link #getVerTs(ByteBuffer, ByteBuffer, ByteBuffer, long, int, Map)}) interface IS
* consistent in that the column is parse like normal.
*/
protected List<TCell> getVerTs(ByteBuffer tableName, ByteBuffer row, byte[] family,
byte[] qualifier, long timestamp, int numVersions, Map<ByteBuffer, ByteBuffer> attributes)
throws IOError {
Table table = null;
try {
table = getTable(tableName);
Get get = new Get(getBytes(row));
addAttributes(get, attributes);
if (null == qualifier) {
get.addFamily(family);
} else {
get.addColumn(family, qualifier);
}
get.setTimeRange(0, timestamp);
get.setMaxVersions(numVersions);
Result result = table.get(get);
return ThriftUtilities.cellFromHBase(result.rawCells());
} catch (IOException e) {
LOG.warn(e.getMessage(), e);
throw new IOError(Throwables.getStackTraceAsString(e));
} finally{
closeTable(table);
}
}
示例2: doGet
import org.apache.hadoop.hbase.client.Get; //导入方法依赖的package包/类
/**
* Run a Get against passed in <code>store</code> on passed <code>row</code>, etc.
*
* @param store
* @param row
* @param family
* @param tr
* @return Get result.
* @throws IOException
*/
private List<Cell> doGet(final Store store, final byte[] row,
final Map.Entry<byte[], List<Cell>> family, final TimeRange tr) throws IOException {
// Sort the cells so that they match the order that they
// appear in the Get results. Otherwise, we won't be able to
// find the existing values if the cells are not specified
// in order by the client since cells are in an array list.
Collections.sort(family.getValue(), store.getComparator());
// Get previous values for all columns in this family
Get get = new Get(row);
for (Cell cell : family.getValue()) {
get.addColumn(family.getKey(), CellUtil.cloneQualifier(cell));
}
if (tr != null) get.setTimeRange(tr.getMin(), tr.getMax());
return get(get, false);
}
示例3: getRowsWithColumnsTs
import org.apache.hadoop.hbase.client.Get; //导入方法依赖的package包/类
@Override
public List<TRowResult> getRowsWithColumnsTs(ByteBuffer tableName,
List<ByteBuffer> rows,
List<ByteBuffer> columns, long timestamp,
Map<ByteBuffer, ByteBuffer> attributes) throws IOError {
Table table= null;
try {
List<Get> gets = new ArrayList<Get>(rows.size());
table = getTable(tableName);
if (metrics != null) {
metrics.incNumRowKeysInBatchGet(rows.size());
}
for (ByteBuffer row : rows) {
Get get = new Get(getBytes(row));
addAttributes(get, attributes);
if (columns != null) {
for(ByteBuffer column : columns) {
byte [][] famAndQf = KeyValue.parseColumn(getBytes(column));
if (famAndQf.length == 1) {
get.addFamily(famAndQf[0]);
} else {
get.addColumn(famAndQf[0], famAndQf[1]);
}
}
}
get.setTimeRange(0, timestamp);
gets.add(get);
}
Result[] result = table.get(gets);
return ThriftUtilities.rowResultFromHBase(result);
} catch (IOException e) {
LOG.warn(e.getMessage(), e);
throw new IOError(Throwables.getStackTraceAsString(e));
} finally{
closeTable(table);
}
}
示例4: getIncrementCurrentValue
import org.apache.hadoop.hbase.client.Get; //导入方法依赖的package包/类
/**
* Do a specific Get on passed <code>columnFamily</code> and column qualifiers from
* <code>incrementCoordinates</code> only.
*
* @param increment
* @param columnFamily
* @param increments
* @return Return the Cells to Increment
* @throws IOException
*/
private List<Cell> getIncrementCurrentValue(final Increment increment, byte[] columnFamily,
final List<Cell> increments, final IsolationLevel isolation) throws IOException {
Get get = new Get(increment.getRow());
if (isolation != null) get.setIsolationLevel(isolation);
for (Cell cell : increments) {
get.addColumn(columnFamily, CellUtil.cloneQualifier(cell));
}
TimeRange tr = increment.getTimeRange();
if (tr != null) {
get.setTimeRange(tr.getMin(), tr.getMax());
}
return get(get, false);
}
示例5: checkGet
import org.apache.hadoop.hbase.client.Get; //导入方法依赖的package包/类
private void checkGet(Region region, byte[] row, byte[] fam, byte[] col,
long time, byte[]... vals) throws IOException {
Get g = new Get(row);
g.addColumn(fam, col);
g.setMaxVersions();
g.setTimeRange(0L, time);
Result r = region.get(g);
checkResult(r, fam, col, vals);
}
示例6: testGet
import org.apache.hadoop.hbase.client.Get; //导入方法依赖的package包/类
@Test public void testGet() throws Exception{
byte[] row = "row".getBytes();
byte[] fam = "fam".getBytes();
byte[] qf1 = "qf1".getBytes();
long ts = System.currentTimeMillis();
int maxVersions = 2;
Get get = new Get(row);
get.addColumn(fam, qf1);
get.setTimeRange(ts, ts+1);
get.setMaxVersions(maxVersions);
ClientProtos.Get getProto = ProtobufUtil.toGet(get);
Get desGet = ProtobufUtil.toGet(getProto);
assertTrue(Bytes.equals(get.getRow(), desGet.getRow()));
Set<byte[]> set = null;
Set<byte[]> desSet = null;
for(Map.Entry<byte[], NavigableSet<byte[]>> entry :
get.getFamilyMap().entrySet()){
assertTrue(desGet.getFamilyMap().containsKey(entry.getKey()));
set = entry.getValue();
desSet = desGet.getFamilyMap().get(entry.getKey());
for(byte [] qualifier : set){
assertTrue(desSet.contains(qualifier));
}
}
assertEquals(get.getMaxVersions(), desGet.getMaxVersions());
TimeRange tr = get.getTimeRange();
TimeRange desTr = desGet.getTimeRange();
assertEquals(tr.getMax(), desTr.getMax());
assertEquals(tr.getMin(), desTr.getMin());
}
示例7: RowResultGenerator
import org.apache.hadoop.hbase.client.Get; //导入方法依赖的package包/类
public RowResultGenerator(final String tableName, final RowSpec rowspec,
final Filter filter, final boolean cacheBlocks)
throws IllegalArgumentException, IOException {
Table table = RESTServlet.getInstance().getTable(tableName);
try {
Get get = new Get(rowspec.getRow());
if (rowspec.hasColumns()) {
for (byte[] col: rowspec.getColumns()) {
byte[][] split = KeyValue.parseColumn(col);
if (split.length == 1) {
get.addFamily(split[0]);
} else if (split.length == 2) {
get.addColumn(split[0], split[1]);
} else {
throw new IllegalArgumentException("Invalid column specifier.");
}
}
}
get.setTimeRange(rowspec.getStartTime(), rowspec.getEndTime());
get.setMaxVersions(rowspec.getMaxVersions());
if (filter != null) {
get.setFilter(filter);
}
get.setCacheBlocks(cacheBlocks);
Result result = table.get(get);
if (result != null && !result.isEmpty()) {
valuesI = result.listCells().iterator();
}
} catch (DoNotRetryIOException | NeedUnmanagedConnectionException e) {
// Warn here because Stargate will return 404 in the case if multiple
// column families were specified but one did not exist -- currently
// HBase will fail the whole Get.
// Specifying multiple columns in a URI should be uncommon usage but
// help to avoid confusion by leaving a record of what happened here in
// the log.
LOG.warn(StringUtils.stringifyException(e));
} finally {
table.close();
}
}
示例8: getFromThrift
import org.apache.hadoop.hbase.client.Get; //导入方法依赖的package包/类
/**
* Creates a {@link Get} (HBase) from a {@link TGet} (Thrift).
*
* This ignores any timestamps set on {@link TColumn} objects.
*
* @param in the <code>TGet</code> to convert
*
* @return <code>Get</code> object
*
* @throws IOException if an invalid time range or max version parameter is given
*/
public static Get getFromThrift(TGet in) throws IOException {
Get out = new Get(in.getRow());
// Timestamp overwrites time range if both are set
if (in.isSetTimestamp()) {
out.setTimeStamp(in.getTimestamp());
} else if (in.isSetTimeRange()) {
out.setTimeRange(in.getTimeRange().getMinStamp(), in.getTimeRange().getMaxStamp());
}
if (in.isSetMaxVersions()) {
out.setMaxVersions(in.getMaxVersions());
}
if (in.isSetFilterString()) {
ParseFilter parseFilter = new ParseFilter();
out.setFilter(parseFilter.parseFilterString(in.getFilterString()));
}
if (in.isSetAttributes()) {
addAttributes(out,in.getAttributes());
}
if (in.isSetAuthorizations()) {
out.setAuthorizations(new Authorizations(in.getAuthorizations().getLabels()));
}
if (!in.isSetColumns()) {
return out;
}
for (TColumn column : in.getColumns()) {
if (column.isSetQualifier()) {
out.addColumn(column.getFamily(), column.getQualifier());
} else {
out.addFamily(column.getFamily());
}
}
return out;
}
示例9: testWithoutKeepingDeletes
import org.apache.hadoop.hbase.client.Get; //导入方法依赖的package包/类
/**
* basic verification of existing behavior
*/
@Test
public void testWithoutKeepingDeletes() throws Exception {
// KEEP_DELETED_CELLS is NOT enabled
HTableDescriptor htd = hbu.createTableDescriptor(name.getMethodName(), 0, 3,
HConstants.FOREVER, KeepDeletedCells.FALSE);
HRegion region = hbu.createLocalHRegion(htd, null, null);
long ts = EnvironmentEdgeManager.currentTime();
Put p = new Put(T1, ts);
p.add(c0, c0, T1);
region.put(p);
Get gOne = new Get(T1);
gOne.setMaxVersions();
gOne.setTimeRange(0L, ts + 1);
Result rOne = region.get(gOne);
assertFalse(rOne.isEmpty());
Delete d = new Delete(T1, ts+2);
d.deleteColumn(c0, c0, ts);
region.delete(d);
// "past" get does not see rows behind delete marker
Get g = new Get(T1);
g.setMaxVersions();
g.setTimeRange(0L, ts+1);
Result r = region.get(g);
assertTrue(r.isEmpty());
// "past" scan does not see rows behind delete marker
Scan s = new Scan();
s.setMaxVersions();
s.setTimeRange(0L, ts+1);
InternalScanner scanner = region.getScanner(s);
List<Cell> kvs = new ArrayList<Cell>();
while (scanner.next(kvs))
;
assertTrue(kvs.isEmpty());
// flushing and minor compaction keep delete markers
region.flush(true);
region.compact(false);
assertEquals(1, countDeleteMarkers(region));
region.compact(true);
// major compaction deleted it
assertEquals(0, countDeleteMarkers(region));
HRegion.closeHRegion(region);
}