当前位置: 首页>>代码示例>>Java>>正文


Java Get.setMaxVersions方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.client.Get.setMaxVersions方法的典型用法代码示例。如果您正苦于以下问题:Java Get.setMaxVersions方法的具体用法?Java Get.setMaxVersions怎么用?Java Get.setMaxVersions使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.client.Get的用法示例。


在下文中一共展示了Get.setMaxVersions方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getVer

import org.apache.hadoop.hbase.client.Get; //导入方法依赖的package包/类
/**
 * Note: this public interface is slightly different from public Java APIs in regard to
 * handling of the qualifier. Here we differ from the public Java API in that null != byte[0].
 * Rather, we respect qual == null as a request for the entire column family. If you want to
 * access the entire column family, use
 * {@link #getVer(ByteBuffer, ByteBuffer, ByteBuffer, int, Map)} with a {@code column} value
 * that lacks a {@code ':'}.
 */
public List<TCell> getVer(ByteBuffer tableName, ByteBuffer row, byte[] family,
    byte[] qualifier, int numVersions, Map<ByteBuffer, ByteBuffer> attributes) throws IOError {

  Table table = null;
  try {
    table = getTable(tableName);
    Get get = new Get(getBytes(row));
    addAttributes(get, attributes);
    if (null == qualifier) {
      get.addFamily(family);
    } else {
      get.addColumn(family, qualifier);
    }
    get.setMaxVersions(numVersions);
    Result result = table.get(get);
    return ThriftUtilities.cellFromHBase(result.rawCells());
  } catch (IOException e) {
    LOG.warn(e.getMessage(), e);
    throw new IOError(Throwables.getStackTraceAsString(e));
  } finally{
    closeTable(table);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:32,代码来源:ThriftServerRunner.java

示例2: getVerTs

import org.apache.hadoop.hbase.client.Get; //导入方法依赖的package包/类
/**
 * Note: this internal interface is slightly different from public APIs in regard to handling
 * of the qualifier. Here we differ from the public Java API in that null != byte[0]. Rather,
 * we respect qual == null as a request for the entire column family. The caller (
 * {@link #getVerTs(ByteBuffer, ByteBuffer, ByteBuffer, long, int, Map)}) interface IS
 * consistent in that the column is parse like normal.
 */
protected List<TCell> getVerTs(ByteBuffer tableName, ByteBuffer row, byte[] family,
    byte[] qualifier, long timestamp, int numVersions, Map<ByteBuffer, ByteBuffer> attributes)
    throws IOError {

  Table table = null;
  try {
    table = getTable(tableName);
    Get get = new Get(getBytes(row));
    addAttributes(get, attributes);
    if (null == qualifier) {
      get.addFamily(family);
    } else {
      get.addColumn(family, qualifier);
    }
    get.setTimeRange(0, timestamp);
    get.setMaxVersions(numVersions);
    Result result = table.get(get);
    return ThriftUtilities.cellFromHBase(result.rawCells());
  } catch (IOException e) {
    LOG.warn(e.getMessage(), e);
    throw new IOError(Throwables.getStackTraceAsString(e));
  } finally{
    closeTable(table);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:33,代码来源:ThriftServerRunner.java

示例3: Get

import org.apache.hadoop.hbase.client.Get; //导入方法依赖的package包/类
/**
 * 获取单张表的单条记录
 * 
 * @throws IOException
 */
public static byte[] Get(String key, String TableName, String ColumnFamily, String ColumnName) throws IOException {
	Get get_cell = new Get(Bytes.toBytes(key));
	get_cell.addColumn(Bytes.toBytes(ColumnFamily), Bytes.toBytes(ColumnName));
	get_cell.setMaxVersions(1);
	get_cell.setCacheBlocks(false);
	Result result = hbase_table.get(get_cell);
	return result.value();
}
 
开发者ID:ItGql,项目名称:SparkIsax,代码行数:14,代码来源:HBaseUtils.java

示例4: checkGet

import org.apache.hadoop.hbase.client.Get; //导入方法依赖的package包/类
private void checkGet(Region region, byte[] row, byte[] fam, byte[] col,
    long time, byte[]... vals) throws IOException {
  Get g = new Get(row);
  g.addColumn(fam, col);
  g.setMaxVersions();
  g.setTimeRange(0L, time);
  Result r = region.get(g);
  checkResult(r, fam, col, vals);

}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:11,代码来源:TestKeepDeletes.java

示例5: testGet

import org.apache.hadoop.hbase.client.Get; //导入方法依赖的package包/类
@Test public void testGet() throws Exception{
  byte[] row = "row".getBytes();
  byte[] fam = "fam".getBytes();
  byte[] qf1 = "qf1".getBytes();

  long ts = System.currentTimeMillis();
  int maxVersions = 2;

  Get get = new Get(row);
  get.addColumn(fam, qf1);
  get.setTimeRange(ts, ts+1);
  get.setMaxVersions(maxVersions);

  ClientProtos.Get getProto = ProtobufUtil.toGet(get);
  Get desGet = ProtobufUtil.toGet(getProto);

  assertTrue(Bytes.equals(get.getRow(), desGet.getRow()));
  Set<byte[]> set = null;
  Set<byte[]> desSet = null;

  for(Map.Entry<byte[], NavigableSet<byte[]>> entry :
      get.getFamilyMap().entrySet()){
    assertTrue(desGet.getFamilyMap().containsKey(entry.getKey()));
    set = entry.getValue();
    desSet = desGet.getFamilyMap().get(entry.getKey());
    for(byte [] qualifier : set){
      assertTrue(desSet.contains(qualifier));
    }
  }

  assertEquals(get.getMaxVersions(), desGet.getMaxVersions());
  TimeRange tr = get.getTimeRange();
  TimeRange desTr = desGet.getTimeRange();
  assertEquals(tr.getMax(), desTr.getMax());
  assertEquals(tr.getMin(), desTr.getMin());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:37,代码来源:TestSerialization.java

示例6: RowResultGenerator

import org.apache.hadoop.hbase.client.Get; //导入方法依赖的package包/类
public RowResultGenerator(final String tableName, final RowSpec rowspec,
    final Filter filter, final boolean cacheBlocks)
    throws IllegalArgumentException, IOException {
  Table table = RESTServlet.getInstance().getTable(tableName);
  try {
    Get get = new Get(rowspec.getRow());
    if (rowspec.hasColumns()) {
      for (byte[] col: rowspec.getColumns()) {
        byte[][] split = KeyValue.parseColumn(col);
        if (split.length == 1) {
          get.addFamily(split[0]);
        } else if (split.length == 2) {
          get.addColumn(split[0], split[1]);
        } else {
          throw new IllegalArgumentException("Invalid column specifier.");
        }
      }
    }
    get.setTimeRange(rowspec.getStartTime(), rowspec.getEndTime());
    get.setMaxVersions(rowspec.getMaxVersions());
    if (filter != null) {
      get.setFilter(filter);
    }
    get.setCacheBlocks(cacheBlocks);
    Result result = table.get(get);
    if (result != null && !result.isEmpty()) {
      valuesI = result.listCells().iterator();
    }
  } catch (DoNotRetryIOException | NeedUnmanagedConnectionException e) {
    // Warn here because Stargate will return 404 in the case if multiple
    // column families were specified but one did not exist -- currently
    // HBase will fail the whole Get.
    // Specifying multiple columns in a URI should be uncommon usage but
    // help to avoid confusion by leaving a record of what happened here in
    // the log.
    LOG.warn(StringUtils.stringifyException(e));
  } finally {
    table.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:41,代码来源:RowResultGenerator.java

示例7: testMultiGet

import org.apache.hadoop.hbase.client.Get; //导入方法依赖的package包/类
@Test
public void testMultiGet() throws Exception {
  ArrayList<Get> gets = new ArrayList<Get>();
  gets.add(new Get(ROW_1));
  gets.add(new Get(ROW_2));
  Result[] results = remoteTable.get(gets);
  assertNotNull(results);
  assertEquals(2, results.length);
  assertEquals(1, results[0].size());
  assertEquals(2, results[1].size());

  //Test Versions
  gets = new ArrayList<Get>();
  Get g = new Get(ROW_1);
  g.setMaxVersions(3);
  gets.add(g);
  gets.add(new Get(ROW_2));
  results = remoteTable.get(gets);
  assertNotNull(results);
  assertEquals(2, results.length);
  assertEquals(1, results[0].size());
  assertEquals(3, results[1].size());

  //404
  gets = new ArrayList<Get>();
  gets.add(new Get(Bytes.toBytes("RESALLYREALLYNOTTHERE")));
  results = remoteTable.get(gets);
  assertNotNull(results);
  assertEquals(0, results.length);

  gets = new ArrayList<Get>();
  gets.add(new Get(Bytes.toBytes("RESALLYREALLYNOTTHERE")));
  gets.add(new Get(ROW_1));
  gets.add(new Get(ROW_2));
  results = remoteTable.get(gets);
  assertNotNull(results);
  assertEquals(2, results.length);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:39,代码来源:TestRemoteTable.java

示例8: getFromThrift

import org.apache.hadoop.hbase.client.Get; //导入方法依赖的package包/类
/**
 * Creates a {@link Get} (HBase) from a {@link TGet} (Thrift).
 *
 * This ignores any timestamps set on {@link TColumn} objects.
 *
 * @param in the <code>TGet</code> to convert
 *
 * @return <code>Get</code> object
 *
 * @throws IOException if an invalid time range or max version parameter is given
 */
public static Get getFromThrift(TGet in) throws IOException {
  Get out = new Get(in.getRow());

  // Timestamp overwrites time range if both are set
  if (in.isSetTimestamp()) {
    out.setTimeStamp(in.getTimestamp());
  } else if (in.isSetTimeRange()) {
    out.setTimeRange(in.getTimeRange().getMinStamp(), in.getTimeRange().getMaxStamp());
  }

  if (in.isSetMaxVersions()) {
    out.setMaxVersions(in.getMaxVersions());
  }

  if (in.isSetFilterString()) {
    ParseFilter parseFilter = new ParseFilter();
    out.setFilter(parseFilter.parseFilterString(in.getFilterString()));
  }

  if (in.isSetAttributes()) {
    addAttributes(out,in.getAttributes());
  }

  if (in.isSetAuthorizations()) {
    out.setAuthorizations(new Authorizations(in.getAuthorizations().getLabels()));
  }
  
  if (!in.isSetColumns()) {
    return out;
  }

  for (TColumn column : in.getColumns()) {
    if (column.isSetQualifier()) {
      out.addColumn(column.getFamily(), column.getQualifier());
    } else {
      out.addFamily(column.getFamily());
    }
  }

  return out;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:53,代码来源:ThriftUtilities.java

示例9: testWithoutKeepingDeletes

import org.apache.hadoop.hbase.client.Get; //导入方法依赖的package包/类
/**
 * basic verification of existing behavior
 */
@Test
public void testWithoutKeepingDeletes() throws Exception {
  // KEEP_DELETED_CELLS is NOT enabled
  HTableDescriptor htd = hbu.createTableDescriptor(name.getMethodName(), 0, 3,
      HConstants.FOREVER, KeepDeletedCells.FALSE);
  HRegion region = hbu.createLocalHRegion(htd, null, null);

  long ts = EnvironmentEdgeManager.currentTime();
  Put p = new Put(T1, ts);
  p.add(c0, c0, T1);
  region.put(p);

  Get gOne = new Get(T1);
  gOne.setMaxVersions();
  gOne.setTimeRange(0L, ts + 1);
  Result rOne = region.get(gOne);
  assertFalse(rOne.isEmpty());


  Delete d = new Delete(T1, ts+2);
  d.deleteColumn(c0, c0, ts);
  region.delete(d);

  // "past" get does not see rows behind delete marker
  Get g = new Get(T1);
  g.setMaxVersions();
  g.setTimeRange(0L, ts+1);
  Result r = region.get(g);
  assertTrue(r.isEmpty());

  // "past" scan does not see rows behind delete marker
  Scan s = new Scan();
  s.setMaxVersions();
  s.setTimeRange(0L, ts+1);
  InternalScanner scanner = region.getScanner(s);
  List<Cell> kvs = new ArrayList<Cell>();
  while (scanner.next(kvs))
    ;
  assertTrue(kvs.isEmpty());

  // flushing and minor compaction keep delete markers
  region.flush(true);
  region.compact(false);
  assertEquals(1, countDeleteMarkers(region));
  region.compact(true);
  // major compaction deleted it
  assertEquals(0, countDeleteMarkers(region));

  HRegion.closeHRegion(region);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:54,代码来源:TestKeepDeletes.java

示例10: testAllColumnsWithBloomFilter

import org.apache.hadoop.hbase.client.Get; //导入方法依赖的package包/类
@Test
public void testAllColumnsWithBloomFilter() throws IOException {
  byte[] TABLE = Bytes.toBytes("testAllColumnsWithBloomFilter");
  byte[] FAMILY = Bytes.toBytes("family");

  // Create table
  HColumnDescriptor hcd = new HColumnDescriptor(FAMILY).setMaxVersions(Integer.MAX_VALUE)
      .setBloomFilterType(BloomType.ROWCOL);
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE));
  htd.addFamily(hcd);
  HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
  this.region = TEST_UTIL.createLocalHRegion(info, htd);
  try {
    // For row:0, col:0: insert versions 1 through 5.
    byte row[] = Bytes.toBytes("row:" + 0);
    byte column[] = Bytes.toBytes("column:" + 0);
    Put put = new Put(row);
    put.setDurability(Durability.SKIP_WAL);
    for (long idx = 1; idx <= 4; idx++) {
      put.add(FAMILY, column, idx, Bytes.toBytes("value-version-" + idx));
    }
    region.put(put);

    // Flush
    region.flush(true);

    // Get rows
    Get get = new Get(row);
    get.setMaxVersions();
    Cell[] kvs = region.get(get).rawCells();

    // Check if rows are correct
    assertEquals(4, kvs.length);
    checkOneCell(kvs[0], FAMILY, 0, 0, 4);
    checkOneCell(kvs[1], FAMILY, 0, 0, 3);
    checkOneCell(kvs[2], FAMILY, 0, 0, 2);
    checkOneCell(kvs[3], FAMILY, 0, 0, 1);
  } finally {
    HRegion.closeHRegion(this.region);
    this.region = null;
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:43,代码来源:TestHRegion.java

示例11: testIncrementColumnValue_ICVDuringFlush

import org.apache.hadoop.hbase.client.Get; //导入方法依赖的package包/类
@Test
public void testIncrementColumnValue_ICVDuringFlush()
    throws IOException, InterruptedException {
  init(this.name.getMethodName());

  long oldValue = 1L;
  long newValue = 3L;
  this.store.add(new KeyValue(row, family, qf1,
      System.currentTimeMillis(),
      Bytes.toBytes(oldValue)));

  // snapshot the store.
  this.store.snapshot();

  // add other things:
  this.store.add(new KeyValue(row, family, qf2,
      System.currentTimeMillis(),
      Bytes.toBytes(oldValue)));

  // update during the snapshot.
  long ret = this.store.updateColumnValue(row, family, qf1, newValue);

  // memstore should have grown by some amount.
  Assert.assertTrue(ret > 0);

  // then flush.
  flushStore(store, id++);
  Assert.assertEquals(1, this.store.getStorefiles().size());
  // from the one we inserted up there, and a new one
  Assert.assertEquals(2, ((DefaultMemStore)this.store.memstore).cellSet.size());

  // how many key/values for this row are there?
  Get get = new Get(row);
  get.addColumn(family, qf1);
  get.setMaxVersions(); // all versions.
  List<Cell> results = new ArrayList<Cell>();

  results = HBaseTestingUtility.getFromStoreFile(store, get);
  Assert.assertEquals(2, results.size());

  long ts1 = results.get(0).getTimestamp();
  long ts2 = results.get(1).getTimestamp();

  Assert.assertTrue(ts1 > ts2);

  Assert.assertEquals(newValue, Bytes.toLong(CellUtil.cloneValue(results.get(0))));
  Assert.assertEquals(oldValue, Bytes.toLong(CellUtil.cloneValue(results.get(1))));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:49,代码来源:TestStore.java

示例12: testIncrementColumnValue_SnapshotFlushCombo

import org.apache.hadoop.hbase.client.Get; //导入方法依赖的package包/类
@Test
public void testIncrementColumnValue_SnapshotFlushCombo() throws Exception {
  ManualEnvironmentEdge mee = new ManualEnvironmentEdge();
  EnvironmentEdgeManagerTestHelper.injectEdge(mee);
  init(this.name.getMethodName());

  long oldValue = 1L;
  long newValue = 3L;
  this.store.add(new KeyValue(row, family, qf1,
      EnvironmentEdgeManager.currentTime(),
      Bytes.toBytes(oldValue)));

  // snapshot the store.
  this.store.snapshot();

  // update during the snapshot, the exact same TS as the Put (lololol)
  long ret = this.store.updateColumnValue(row, family, qf1, newValue);

  // memstore should have grown by some amount.
  Assert.assertTrue(ret > 0);

  // then flush.
  flushStore(store, id++);
  Assert.assertEquals(1, this.store.getStorefiles().size());
  Assert.assertEquals(1, ((DefaultMemStore)this.store.memstore).cellSet.size());

  // now increment again:
  newValue += 1;
  this.store.updateColumnValue(row, family, qf1, newValue);

  // at this point we have a TS=1 in snapshot, and a TS=2 in kvset, so increment again:
  newValue += 1;
  this.store.updateColumnValue(row, family, qf1, newValue);

  // the second TS should be TS=2 or higher., even though 'time=1' right now.


  // how many key/values for this row are there?
  Get get = new Get(row);
  get.addColumn(family, qf1);
  get.setMaxVersions(); // all versions.
  List<Cell> results = new ArrayList<Cell>();

  results = HBaseTestingUtility.getFromStoreFile(store, get);
  Assert.assertEquals(2, results.size());

  long ts1 = results.get(0).getTimestamp();
  long ts2 = results.get(1).getTimestamp();

  Assert.assertTrue(ts1 > ts2);
  Assert.assertEquals(newValue, Bytes.toLong(CellUtil.cloneValue(results.get(0))));
  Assert.assertEquals(oldValue, Bytes.toLong(CellUtil.cloneValue(results.get(1))));

  mee.setValue(2); // time goes up slightly
  newValue += 1;
  this.store.updateColumnValue(row, family, qf1, newValue);

  results = HBaseTestingUtility.getFromStoreFile(store, get);
  Assert.assertEquals(2, results.size());

  ts1 = results.get(0).getTimestamp();
  ts2 = results.get(1).getTimestamp();

  Assert.assertTrue(ts1 > ts2);
  Assert.assertEquals(newValue, Bytes.toLong(CellUtil.cloneValue(results.get(0))));
  Assert.assertEquals(oldValue, Bytes.toLong(CellUtil.cloneValue(results.get(1))));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:68,代码来源:TestStore.java


注:本文中的org.apache.hadoop.hbase.client.Get.setMaxVersions方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。