本文整理汇总了Java中org.apache.hadoop.hbase.client.Get.addFamily方法的典型用法代码示例。如果您正苦于以下问题:Java Get.addFamily方法的具体用法?Java Get.addFamily怎么用?Java Get.addFamily使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.client.Get
的用法示例。
在下文中一共展示了Get.addFamily方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getVer
import org.apache.hadoop.hbase.client.Get; //导入方法依赖的package包/类
/**
* Note: this public interface is slightly different from public Java APIs in regard to
* handling of the qualifier. Here we differ from the public Java API in that null != byte[0].
* Rather, we respect qual == null as a request for the entire column family. If you want to
* access the entire column family, use
* {@link #getVer(ByteBuffer, ByteBuffer, ByteBuffer, int, Map)} with a {@code column} value
* that lacks a {@code ':'}.
*/
public List<TCell> getVer(ByteBuffer tableName, ByteBuffer row, byte[] family,
byte[] qualifier, int numVersions, Map<ByteBuffer, ByteBuffer> attributes) throws IOError {
Table table = null;
try {
table = getTable(tableName);
Get get = new Get(getBytes(row));
addAttributes(get, attributes);
if (null == qualifier) {
get.addFamily(family);
} else {
get.addColumn(family, qualifier);
}
get.setMaxVersions(numVersions);
Result result = table.get(get);
return ThriftUtilities.cellFromHBase(result.rawCells());
} catch (IOException e) {
LOG.warn(e.getMessage(), e);
throw new IOError(Throwables.getStackTraceAsString(e));
} finally{
closeTable(table);
}
}
示例2: getPermissions
import org.apache.hadoop.hbase.client.Get; //导入方法依赖的package包/类
/**
* Reads user permission assignments stored in the <code>l:</code> column
* family of the first table row in <code>_acl_</code>.
*
* <p>
* See {@link AccessControlLists class documentation} for the key structure
* used for storage.
* </p>
*/
static ListMultimap<String, TablePermission> getPermissions(Configuration conf,
byte[] entryName) throws IOException {
if (entryName == null) entryName = ACL_GLOBAL_NAME;
// for normal user tables, we just read the table row from _acl_
ListMultimap<String, TablePermission> perms = ArrayListMultimap.create();
// TODO: Pass in a Connection rather than create one each time.
try (Connection connection = ConnectionFactory.createConnection(conf)) {
try (Table table = connection.getTable(ACL_TABLE_NAME)) {
Get get = new Get(entryName);
get.addFamily(ACL_LIST_FAMILY);
Result row = table.get(get);
if (!row.isEmpty()) {
perms = parsePermissions(entryName, row);
} else {
LOG.info("No permissions found in " + ACL_TABLE_NAME + " for acl entry "
+ Bytes.toString(entryName));
}
}
}
return perms;
}
示例3: createGet
import org.apache.hadoop.hbase.client.Get; //导入方法依赖的package包/类
protected Get createGet(long keyToRead) throws IOException {
Get get = new Get(dataGenerator.getDeterministicUniqueKey(keyToRead));
String cfsString = "";
byte[][] columnFamilies = dataGenerator.getColumnFamilies();
for (byte[] cf : columnFamilies) {
get.addFamily(cf);
if (verbose) {
if (cfsString.length() > 0) {
cfsString += ", ";
}
cfsString += "[" + Bytes.toStringBinary(cf) + "]";
}
}
get = dataGenerator.beforeGet(keyToRead, get);
if (regionReplicaId > 0) {
get.setReplicaId(regionReplicaId);
get.setConsistency(Consistency.TIMELINE);
}
if (verbose) {
LOG.info("[" + readerId + "] " + "Querying key " + keyToRead + ", cfs " + cfsString);
}
return get;
}
示例4: setUp
import org.apache.hadoop.hbase.client.Get; //导入方法依赖的package包/类
public void setUp() throws Exception {
super.setUp();
row1 = Bytes.toBytes("row1");
row2 = Bytes.toBytes("row2");
row3 = Bytes.toBytes("row3");
fam1 = Bytes.toBytes("fam1");
fam2 = Bytes.toBytes("fam2");
col1 = Bytes.toBytes("col1");
col2 = Bytes.toBytes("col2");
col3 = Bytes.toBytes("col3");
col4 = Bytes.toBytes("col4");
col5 = Bytes.toBytes("col5");
data = Bytes.toBytes("data");
//Create Get
get = new Get(row1);
get.addFamily(fam1);
get.addColumn(fam2, col2);
get.addColumn(fam2, col4);
get.addColumn(fam2, col5);
this.scan = new Scan(get);
rowComparator = KeyValue.COMPARATOR;
}
示例5: run
import org.apache.hadoop.hbase.client.Get; //导入方法依赖的package包/类
public void run() {
try {
Get get = new Get(rowkey);
get.setFilter(ftlist);
get.setCacheBlocks(false);
if (resultColumns != null && resultColumns.length != 0) {
for (byte[] column : resultColumns) {
byte[][] tmp = KeyValue.parseColumn(column);
if (tmp.length == 1) {
get.addFamily(tmp[0]);
} else {
get.addColumn(tmp[0], tmp[1]);
}
}
}
rsnew = table.get(get);
table.close();
} catch (Exception e) {
rsnew = null;
exception = e;
}
}
示例6: getIndexTableGet
import org.apache.hadoop.hbase.client.Get; //导入方法依赖的package包/类
@Override protected Get getIndexTableGet(String line) throws IOException, ParseException {
MDPoint point = getRecordMDPoint(line);
byte[] row = mdAdmin.getBucketSuffixRow(point);
Get get = new Get(row);
get.addFamily(MDHBaseAdmin.BUCKET_FAMILY);
return get;
}
示例7: RowResultGenerator
import org.apache.hadoop.hbase.client.Get; //导入方法依赖的package包/类
public RowResultGenerator(final String tableName, final RowSpec rowspec,
final Filter filter, final boolean cacheBlocks)
throws IllegalArgumentException, IOException {
Table table = RESTServlet.getInstance().getTable(tableName);
try {
Get get = new Get(rowspec.getRow());
if (rowspec.hasColumns()) {
for (byte[] col: rowspec.getColumns()) {
byte[][] split = KeyValue.parseColumn(col);
if (split.length == 1) {
get.addFamily(split[0]);
} else if (split.length == 2) {
get.addColumn(split[0], split[1]);
} else {
throw new IllegalArgumentException("Invalid column specifier.");
}
}
}
get.setTimeRange(rowspec.getStartTime(), rowspec.getEndTime());
get.setMaxVersions(rowspec.getMaxVersions());
if (filter != null) {
get.setFilter(filter);
}
get.setCacheBlocks(cacheBlocks);
Result result = table.get(get);
if (result != null && !result.isEmpty()) {
valuesI = result.listCells().iterator();
}
} catch (DoNotRetryIOException | NeedUnmanagedConnectionException e) {
// Warn here because Stargate will return 404 in the case if multiple
// column families were specified but one did not exist -- currently
// HBase will fail the whole Get.
// Specifying multiple columns in a URI should be uncommon usage but
// help to avoid confusion by leaving a record of what happened here in
// the log.
LOG.warn(StringUtils.stringifyException(e));
} finally {
table.close();
}
}
示例8: testRow
import org.apache.hadoop.hbase.client.Get; //导入方法依赖的package包/类
@Override
void testRow(final int i) throws IOException, InterruptedException {
if (opts.randomSleep > 0) {
Thread.sleep(rd.nextInt(opts.randomSleep));
}
Get get = new Get(getRandomRow(this.rand, opts.totalRows));
if (opts.addColumns) {
get.addColumn(FAMILY_NAME, QUALIFIER_NAME);
} else {
get.addFamily(FAMILY_NAME);
}
if (opts.filterAll) {
get.setFilter(new FilterAllFilter());
}
get.setConsistency(consistency);
if (LOG.isTraceEnabled()) LOG.trace(get.toString());
if (opts.multiGet > 0) {
this.gets.add(get);
if (this.gets.size() == opts.multiGet) {
Result [] rs = this.table.get(this.gets);
updateValueSize(rs);
this.gets.clear();
}
} else {
updateValueSize(this.table.get(get));
}
}
示例9: ensureRowNotReplicated
import org.apache.hadoop.hbase.client.Get; //导入方法依赖的package包/类
private void ensureRowNotReplicated(byte[] row, byte[] fam, Table... tables) throws IOException {
Get get = new Get(row);
get.addFamily(fam);
for (Table table : tables) {
Result res = table.get(get);
assertEquals(0, res.size());
}
}
示例10: deleteAndWaitWithFamily
import org.apache.hadoop.hbase.client.Get; //导入方法依赖的package包/类
private void deleteAndWaitWithFamily(byte[] row, byte[] fam,
Table source, Table... targets)
throws Exception {
Delete del = new Delete(row);
del.deleteFamily(fam);
source.delete(del);
Get get = new Get(row);
get.addFamily(fam);
for (int i = 0; i < NB_RETRIES; i++) {
if (i==NB_RETRIES-1) {
fail("Waited too much time for del replication");
}
boolean removedFromAll = true;
for (Table target : targets) {
Result res = target.get(get);
if (res.size() >= 1) {
LOG.info("Row not deleted");
removedFromAll = false;
break;
}
}
if (removedFromAll) {
break;
} else {
Thread.sleep(SLEEP_TIME);
}
}
}
示例11: putAndWaitWithFamily
import org.apache.hadoop.hbase.client.Get; //导入方法依赖的package包/类
private void putAndWaitWithFamily(byte[] row, byte[] fam,
Table source, Table... targets)
throws Exception {
Put put = new Put(row);
put.add(fam, row, val);
source.put(put);
Get get = new Get(row);
get.addFamily(fam);
for (int i = 0; i < NB_RETRIES; i++) {
if (i==NB_RETRIES-1) {
fail("Waited too much time for put replication");
}
boolean replicatedToAll = true;
for (Table target : targets) {
Result res = target.get(get);
if (res.size() == 0) {
LOG.info("Row not available");
replicatedToAll = false;
break;
} else {
assertEquals(res.size(), 1);
assertArrayEquals(res.value(), val);
}
}
if (replicatedToAll) {
break;
} else {
Thread.sleep(SLEEP_TIME);
}
}
}
示例12: verifyMerge
import org.apache.hadoop.hbase.client.Get; //导入方法依赖的package包/类
private void verifyMerge(final HRegion merged, final int upperbound)
throws IOException {
//Test
Scan scan = new Scan();
scan.addFamily(FAMILY);
InternalScanner scanner = merged.getScanner(scan);
try {
List<Cell> testRes = null;
while (true) {
testRes = new ArrayList<Cell>();
boolean hasNext = scanner.next(testRes);
if (!hasNext) {
break;
}
}
} finally {
scanner.close();
}
//!Test
for (int i = 0; i < upperbound; i++) {
for (int j = 0; j < rows[i].length; j++) {
Get get = new Get(rows[i][j]);
get.addFamily(FAMILY);
Result result = merged.get(get);
assertEquals(1, result.size());
byte [] bytes = CellUtil.cloneValue(result.rawCells()[0]);
assertNotNull(Bytes.toStringBinary(rows[i][j]), bytes);
assertTrue(Bytes.equals(bytes, rows[i][j]));
}
}
}
示例13: testMergeTool
import org.apache.hadoop.hbase.client.Get; //导入方法依赖的package包/类
/**
* Test merge tool.
* @throws Exception
*/
public void testMergeTool() throws Exception {
// First verify we can read the rows from the source regions and that they
// contain the right data.
for (int i = 0; i < regions.length; i++) {
for (int j = 0; j < rows[i].length; j++) {
Get get = new Get(rows[i][j]);
get.addFamily(FAMILY);
Result result = regions[i].get(get);
byte [] bytes = CellUtil.cloneValue(result.rawCells()[0]);
assertNotNull(bytes);
assertTrue(Bytes.equals(bytes, rows[i][j]));
}
// Close the region and delete the log
HRegion.closeHRegion(regions[i]);
}
WAL log = wals.getWAL(new byte[]{});
// Merge Region 0 and Region 1
HRegion merged = mergeAndVerify("merging regions 0 and 1 ",
this.sourceRegions[0].getRegionNameAsString(),
this.sourceRegions[1].getRegionNameAsString(), log, 2);
// Merge the result of merging regions 0 and 1 with region 2
merged = mergeAndVerify("merging regions 0+1 and 2",
merged.getRegionInfo().getRegionNameAsString(),
this.sourceRegions[2].getRegionNameAsString(), log, 3);
// Merge the result of merging regions 0, 1 and 2 with region 3
merged = mergeAndVerify("merging regions 0+1+2 and 3",
merged.getRegionInfo().getRegionNameAsString(),
this.sourceRegions[3].getRegionNameAsString(), log, 4);
// Merge the result of merging regions 0, 1, 2 and 3 with region 4
merged = mergeAndVerify("merging regions 0+1+2+3 and 4",
merged.getRegionInfo().getRegionNameAsString(),
this.sourceRegions[4].getRegionNameAsString(), log, rows.length);
}
示例14: testDelete_multiDeleteColumn
import org.apache.hadoop.hbase.client.Get; //导入方法依赖的package包/类
@Test
public void testDelete_multiDeleteColumn() throws IOException {
byte[] row1 = Bytes.toBytes("row1");
byte[] fam1 = Bytes.toBytes("fam1");
byte[] qual = Bytes.toBytes("qualifier");
byte[] value = Bytes.toBytes("value");
Put put = new Put(row1);
put.add(fam1, qual, 1, value);
put.add(fam1, qual, 2, value);
String method = this.getName();
this.region = initHRegion(tableName, method, CONF, fam1);
try {
region.put(put);
// We do support deleting more than 1 'latest' version
Delete delete = new Delete(row1);
delete.deleteColumn(fam1, qual);
delete.deleteColumn(fam1, qual);
region.delete(delete);
Get get = new Get(row1);
get.addFamily(fam1);
Result r = region.get(get);
assertEquals(0, r.size());
} finally {
HRegion.closeHRegion(this.region);
this.region = null;
}
}
示例15: validateData
import org.apache.hadoop.hbase.client.Get; //导入方法依赖的package包/类
void validateData(Table table, int rownum) throws IOException {
String row = "row" + String.format("%1$04d", rownum);
Get get = new Get(Bytes.toBytes(row));
get.addFamily(HConstants.CATALOG_FAMILY);
Result result = table.get(get);
assertTrue(result.size() == 1);
assertTrue(Bytes.equals(value,
result.getValue(HConstants.CATALOG_FAMILY, null)));
LOG.info("Validated row " + row);
}