本文整理匯總了Java中org.apache.hadoop.hbase.client.Get.addColumn方法的典型用法代碼示例。如果您正苦於以下問題:Java Get.addColumn方法的具體用法?Java Get.addColumn怎麽用?Java Get.addColumn使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.hbase.client.Get
的用法示例。
在下文中一共展示了Get.addColumn方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: setUp
import org.apache.hadoop.hbase.client.Get; //導入方法依賴的package包/類
public void setUp() throws Exception {
super.setUp();
row1 = Bytes.toBytes("row1");
row2 = Bytes.toBytes("row2");
row3 = Bytes.toBytes("row3");
fam1 = Bytes.toBytes("fam1");
fam2 = Bytes.toBytes("fam2");
col1 = Bytes.toBytes("col1");
col2 = Bytes.toBytes("col2");
col3 = Bytes.toBytes("col3");
col4 = Bytes.toBytes("col4");
col5 = Bytes.toBytes("col5");
data = Bytes.toBytes("data");
//Create Get
get = new Get(row1);
get.addFamily(fam1);
get.addColumn(fam2, col2);
get.addColumn(fam2, col4);
get.addColumn(fam2, col5);
this.scan = new Scan(get);
rowComparator = KeyValue.COMPARATOR;
}
示例2: verifyHBaseCell
import org.apache.hadoop.hbase.client.Get; //導入方法依賴的package包/類
protected void verifyHBaseCell(String tableName, String rowKey,
String colFamily, String colName, String val) throws IOException {
Get get = new Get(Bytes.toBytes(rowKey));
get.addColumn(Bytes.toBytes(colFamily), Bytes.toBytes(colName));
HTable table = new HTable(new Configuration(
hbaseTestUtil.getConfiguration()), Bytes.toBytes(tableName));
try {
Result r = table.get(get);
byte [] actualVal = r.getValue(Bytes.toBytes(colFamily),
Bytes.toBytes(colName));
if (null == val) {
assertNull("Got a result when expected null", actualVal);
} else {
assertNotNull("No result, but we expected one", actualVal);
assertEquals(val, Bytes.toString(actualVal));
}
} finally {
table.close();
}
}
示例3: verifyData
import org.apache.hadoop.hbase.client.Get; //導入方法依賴的package包/類
static void verifyData(HRegion newReg, int startRow, int numRows, byte[] qf, byte[]... families)
throws IOException {
for (int i = startRow; i < startRow + numRows; i++) {
byte[] row = Bytes.toBytes("" + i);
Get get = new Get(row);
for (byte[] family : families) {
get.addColumn(family, qf);
}
Result result = newReg.get(get);
Cell[] raw = result.rawCells();
assertEquals(families.length, result.size());
for (int j = 0; j < families.length; j++) {
assertTrue(CellUtil.matchingRow(raw[j], row));
assertTrue(CellUtil.matchingFamily(raw[j], families[j]));
assertTrue(CellUtil.matchingQualifier(raw[j], qf));
}
}
}
示例4: getVer
import org.apache.hadoop.hbase.client.Get; //導入方法依賴的package包/類
/**
* Note: this public interface is slightly different from public Java APIs in regard to
* handling of the qualifier. Here we differ from the public Java API in that null != byte[0].
* Rather, we respect qual == null as a request for the entire column family. If you want to
* access the entire column family, use
* {@link #getVer(ByteBuffer, ByteBuffer, ByteBuffer, int, Map)} with a {@code column} value
* that lacks a {@code ':'}.
*/
public List<TCell> getVer(ByteBuffer tableName, ByteBuffer row, byte[] family,
byte[] qualifier, int numVersions, Map<ByteBuffer, ByteBuffer> attributes) throws IOError {
Table table = null;
try {
table = getTable(tableName);
Get get = new Get(getBytes(row));
addAttributes(get, attributes);
if (null == qualifier) {
get.addFamily(family);
} else {
get.addColumn(family, qualifier);
}
get.setMaxVersions(numVersions);
Result result = table.get(get);
return ThriftUtilities.cellFromHBase(result.rawCells());
} catch (IOException e) {
LOG.warn(e.getMessage(), e);
throw new IOError(Throwables.getStackTraceAsString(e));
} finally{
closeTable(table);
}
}
示例5: getVerTs
import org.apache.hadoop.hbase.client.Get; //導入方法依賴的package包/類
/**
* Note: this internal interface is slightly different from public APIs in regard to handling
* of the qualifier. Here we differ from the public Java API in that null != byte[0]. Rather,
* we respect qual == null as a request for the entire column family. The caller (
* {@link #getVerTs(ByteBuffer, ByteBuffer, ByteBuffer, long, int, Map)}) interface IS
* consistent in that the column is parse like normal.
*/
protected List<TCell> getVerTs(ByteBuffer tableName, ByteBuffer row, byte[] family,
byte[] qualifier, long timestamp, int numVersions, Map<ByteBuffer, ByteBuffer> attributes)
throws IOError {
Table table = null;
try {
table = getTable(tableName);
Get get = new Get(getBytes(row));
addAttributes(get, attributes);
if (null == qualifier) {
get.addFamily(family);
} else {
get.addColumn(family, qualifier);
}
get.setTimeRange(0, timestamp);
get.setMaxVersions(numVersions);
Result result = table.get(get);
return ThriftUtilities.cellFromHBase(result.rawCells());
} catch (IOException e) {
LOG.warn(e.getMessage(), e);
throw new IOError(Throwables.getStackTraceAsString(e));
} finally{
closeTable(table);
}
}
示例6: doGet
import org.apache.hadoop.hbase.client.Get; //導入方法依賴的package包/類
/**
* Run a Get against passed in <code>store</code> on passed <code>row</code>, etc.
*
* @param store
* @param row
* @param family
* @param tr
* @return Get result.
* @throws IOException
*/
private List<Cell> doGet(final Store store, final byte[] row,
final Map.Entry<byte[], List<Cell>> family, final TimeRange tr) throws IOException {
// Sort the cells so that they match the order that they
// appear in the Get results. Otherwise, we won't be able to
// find the existing values if the cells are not specified
// in order by the client since cells are in an array list.
Collections.sort(family.getValue(), store.getComparator());
// Get previous values for all columns in this family
Get get = new Get(row);
for (Cell cell : family.getValue()) {
get.addColumn(family.getKey(), CellUtil.cloneQualifier(cell));
}
if (tr != null) get.setTimeRange(tr.getMin(), tr.getMax());
return get(get, false);
}
示例7: Get
import org.apache.hadoop.hbase.client.Get; //導入方法依賴的package包/類
/**
* 獲取單張表的單條記錄
*
* @throws IOException
*/
public static byte[] Get(String key, String TableName, String ColumnFamily, String ColumnName) throws IOException {
Get get_cell = new Get(Bytes.toBytes(key));
get_cell.addColumn(Bytes.toBytes(ColumnFamily), Bytes.toBytes(ColumnName));
get_cell.setMaxVersions(1);
get_cell.setCacheBlocks(false);
Result result = hbase_table.get(get_cell);
return result.value();
}
示例8: getQuotas
import org.apache.hadoop.hbase.client.Get; //導入方法依賴的package包/類
private static Quotas getQuotas(final Connection connection, final byte[] rowKey,
final byte[] qualifier) throws IOException {
Get get = new Get(rowKey);
get.addColumn(QUOTA_FAMILY_INFO, qualifier);
Result result = doGet(connection, get);
if (result.isEmpty()) {
return null;
}
return quotasFromData(result.getValue(QUOTA_FAMILY_INFO, qualifier));
}
示例9: getIncrementCurrentValue
import org.apache.hadoop.hbase.client.Get; //導入方法依賴的package包/類
/**
* Do a specific Get on passed <code>columnFamily</code> and column qualifiers from
* <code>incrementCoordinates</code> only.
*
* @param increment
* @param columnFamily
* @param increments
* @return Return the Cells to Increment
* @throws IOException
*/
private List<Cell> getIncrementCurrentValue(final Increment increment, byte[] columnFamily,
final List<Cell> increments, final IsolationLevel isolation) throws IOException {
Get get = new Get(increment.getRow());
if (isolation != null) get.setIsolationLevel(isolation);
for (Cell cell : increments) {
get.addColumn(columnFamily, CellUtil.cloneQualifier(cell));
}
TimeRange tr = increment.getTimeRange();
if (tr != null) {
get.setTimeRange(tr.getMin(), tr.getMax());
}
return get(get, false);
}
示例10: RowResultGenerator
import org.apache.hadoop.hbase.client.Get; //導入方法依賴的package包/類
public RowResultGenerator(final String tableName, final RowSpec rowspec,
final Filter filter, final boolean cacheBlocks)
throws IllegalArgumentException, IOException {
Table table = RESTServlet.getInstance().getTable(tableName);
try {
Get get = new Get(rowspec.getRow());
if (rowspec.hasColumns()) {
for (byte[] col: rowspec.getColumns()) {
byte[][] split = KeyValue.parseColumn(col);
if (split.length == 1) {
get.addFamily(split[0]);
} else if (split.length == 2) {
get.addColumn(split[0], split[1]);
} else {
throw new IllegalArgumentException("Invalid column specifier.");
}
}
}
get.setTimeRange(rowspec.getStartTime(), rowspec.getEndTime());
get.setMaxVersions(rowspec.getMaxVersions());
if (filter != null) {
get.setFilter(filter);
}
get.setCacheBlocks(cacheBlocks);
Result result = table.get(get);
if (result != null && !result.isEmpty()) {
valuesI = result.listCells().iterator();
}
} catch (DoNotRetryIOException | NeedUnmanagedConnectionException e) {
// Warn here because Stargate will return 404 in the case if multiple
// column families were specified but one did not exist -- currently
// HBase will fail the whole Get.
// Specifying multiple columns in a URI should be uncommon usage but
// help to avoid confusion by leaving a record of what happened here in
// the log.
LOG.warn(StringUtils.stringifyException(e));
} finally {
table.close();
}
}
示例11: testGet
import org.apache.hadoop.hbase.client.Get; //導入方法依賴的package包/類
@Test public void testGet() throws Exception{
byte[] row = "row".getBytes();
byte[] fam = "fam".getBytes();
byte[] qf1 = "qf1".getBytes();
long ts = System.currentTimeMillis();
int maxVersions = 2;
Get get = new Get(row);
get.addColumn(fam, qf1);
get.setTimeRange(ts, ts+1);
get.setMaxVersions(maxVersions);
ClientProtos.Get getProto = ProtobufUtil.toGet(get);
Get desGet = ProtobufUtil.toGet(getProto);
assertTrue(Bytes.equals(get.getRow(), desGet.getRow()));
Set<byte[]> set = null;
Set<byte[]> desSet = null;
for(Map.Entry<byte[], NavigableSet<byte[]>> entry :
get.getFamilyMap().entrySet()){
assertTrue(desGet.getFamilyMap().containsKey(entry.getKey()));
set = entry.getValue();
desSet = desGet.getFamilyMap().get(entry.getKey());
for(byte [] qualifier : set){
assertTrue(desSet.contains(qualifier));
}
}
assertEquals(get.getMaxVersions(), desGet.getMaxVersions());
TimeRange tr = get.getTimeRange();
TimeRange desTr = desGet.getTimeRange();
assertEquals(tr.getMax(), desTr.getMax());
assertEquals(tr.getMin(), desTr.getMin());
}
示例12: testDeleteRowWithBloomFilter
import org.apache.hadoop.hbase.client.Get; //導入方法依賴的package包/類
/**
* Testcase to cover bug-fix for HBASE-2823 Ensures correct delete when
* issuing delete row on columns with bloom filter set to row+col
* (BloomType.ROWCOL)
*/
@Test
public void testDeleteRowWithBloomFilter() throws IOException {
byte[] familyName = Bytes.toBytes("familyName");
// Create Table
HColumnDescriptor hcd = new HColumnDescriptor(familyName).setMaxVersions(Integer.MAX_VALUE)
.setBloomFilterType(BloomType.ROWCOL);
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
htd.addFamily(hcd);
HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
this.region = TEST_UTIL.createLocalHRegion(info, htd);
try {
// Insert some data
byte row[] = Bytes.toBytes("row1");
byte col[] = Bytes.toBytes("col1");
Put put = new Put(row);
put.add(familyName, col, 1, Bytes.toBytes("SomeRandomValue"));
region.put(put);
region.flush(true);
Delete del = new Delete(row);
region.delete(del);
region.flush(true);
// Get remaining rows (should have none)
Get get = new Get(row);
get.addColumn(familyName, col);
Cell[] keyValues = region.get(get).rawCells();
assertTrue(keyValues.length == 0);
} finally {
HRegion.closeHRegion(this.region);
this.region = null;
}
}
示例13: checkGet
import org.apache.hadoop.hbase.client.Get; //導入方法依賴的package包/類
private void checkGet(Region region, byte[] row, byte[] fam, byte[] col,
long time, byte[]... vals) throws IOException {
Get g = new Get(row);
g.addColumn(fam, col);
g.setMaxVersions();
g.setTimeRange(0L, time);
Result r = region.get(g);
checkResult(r, fam, col, vals);
}
示例14: verifyInvocationResults
import org.apache.hadoop.hbase.client.Get; //導入方法依賴的package包/類
public void verifyInvocationResults(Integer[] selectQualifiers,
Integer[] expectedQualifiers) throws Exception {
Get get = new Get(ROW_BYTES);
for (int i = 0; i < selectQualifiers.length; i++) {
get.addColumn(FAMILY_NAME_BYTES,
Bytes.toBytes(QUALIFIER_PREFIX + selectQualifiers[i]));
}
get.setFilter(new InvocationRecordFilter());
List<KeyValue> expectedValues = new ArrayList<KeyValue>();
for (int i = 0; i < expectedQualifiers.length; i++) {
expectedValues.add(new KeyValue(ROW_BYTES, FAMILY_NAME_BYTES, Bytes
.toBytes(QUALIFIER_PREFIX + expectedQualifiers[i]),
expectedQualifiers[i], Bytes.toBytes(VALUE_PREFIX
+ expectedQualifiers[i])));
}
Scan scan = new Scan(get);
List<Cell> actualValues = new ArrayList<Cell>();
List<Cell> temp = new ArrayList<Cell>();
InternalScanner scanner = this.region.getScanner(scan);
while (scanner.next(temp)) {
actualValues.addAll(temp);
temp.clear();
}
actualValues.addAll(temp);
Assert.assertTrue("Actual values " + actualValues
+ " differ from the expected values:" + expectedValues,
expectedValues.equals(actualValues));
}
示例15: getValue
import org.apache.hadoop.hbase.client.Get; //導入方法依賴的package包/類
private Optional<byte[]> getValue(final byte[] key) {
final Get get = new Get(key);
get.addColumn(ID_FAMILY, ID_COL_QUALIFIER);
final Result result = doGet(get);
return Optional.ofNullable(result.getValue(ID_FAMILY, ID_COL_QUALIFIER));
}