本文整理汇总了Java中org.apache.hadoop.hbase.CellUtil.matchingFamily方法的典型用法代码示例。如果您正苦于以下问题:Java CellUtil.matchingFamily方法的具体用法?Java CellUtil.matchingFamily怎么用?Java CellUtil.matchingFamily使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.CellUtil
的用法示例。
在下文中一共展示了CellUtil.matchingFamily方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: FSWALEntry
import org.apache.hadoop.hbase.CellUtil; //导入方法依赖的package包/类
FSWALEntry(final long sequence, final WALKey key, final WALEdit edit,
final HTableDescriptor htd, final HRegionInfo hri, final boolean inMemstore) {
super(key, edit);
this.inMemstore = inMemstore;
this.htd = htd;
this.hri = hri;
this.sequence = sequence;
if (inMemstore) {
// construct familyNames here to reduce the work of log sinker.
ArrayList<Cell> cells = this.getEdit().getCells();
if (CollectionUtils.isEmpty(cells)) {
this.familyNames = Collections.<byte[]> emptySet();
} else {
Set<byte[]> familySet = Sets.newTreeSet(Bytes.BYTES_COMPARATOR);
for (Cell cell : cells) {
if (!CellUtil.matchingFamily(cell, WALEdit.METAFAMILY)) {
familySet.add(CellUtil.cloneFamily(cell));
}
}
this.familyNames = Collections.unmodifiableSet(familySet);
}
} else {
this.familyNames = Collections.<byte[]> emptySet();
}
}
示例2: mapRow
import org.apache.hadoop.hbase.CellUtil; //导入方法依赖的package包/类
@Override
public BaseDataPoint mapRow(Result result, int rowNum) throws Exception {
if (result.isEmpty()) {
// return Collections.emptyList();
return null;
}
final byte[] distributedRowKey = result.getRow();
// List<BaseDataPoint> dataPoints = new ArrayList<>();
EasyHBaseBo bo = new EasyHBaseBo();
for (Cell cell : result.rawCells()) {
if (CellUtil.matchingFamily(cell, HBaseTables.EASYHBASE_CF)) {
bo.setRowkey(Bytes.toString(cell.getRow()));
bo.setValue(Bytes.toString(cell.getValue()));
bo.setTimestamp(cell.getTimestamp());
// dataPoints.add(bo);
// List<T> candidates = new ArrayList<>();
// for (T candidate : candidates) {
// candidate.setRowkey(candidate.getRowkey());
// candidate.setValue(candidate.getValue());
// candidate.setTimestamp(candidate.getTimestamp());
// dataPoints.add(candidate);
// }
}
}
// Reverse sort as timestamp is stored in a reversed order.
// Collections.sort(dataPoints, REVERSE_TIMESTAMP_COMPARATOR);
return bo;
}
示例3: filterCellByStore
import org.apache.hadoop.hbase.CellUtil; //导入方法依赖的package包/类
private void filterCellByStore(Entry logEntry) {
Map<byte[], Long> maxSeqIdInStores =
regionMaxSeqIdInStores.get(Bytes.toString(logEntry.getKey().getEncodedRegionName()));
if (maxSeqIdInStores == null || maxSeqIdInStores.isEmpty()) {
return;
}
// Create the array list for the cells that aren't filtered.
// We make the assumption that most cells will be kept.
ArrayList<Cell> keptCells = new ArrayList<Cell>(logEntry.getEdit().getCells().size());
for (Cell cell : logEntry.getEdit().getCells()) {
if (CellUtil.matchingFamily(cell, WALEdit.METAFAMILY)) {
keptCells.add(cell);
} else {
byte[] family = CellUtil.cloneFamily(cell);
Long maxSeqId = maxSeqIdInStores.get(family);
// Do not skip cell even if maxSeqId is null. Maybe we are in a rolling upgrade,
// or the master was crashed before and we can not get the information.
if (maxSeqId == null || maxSeqId.longValue() < logEntry.getKey().getLogSeqNum()) {
keptCells.add(cell);
}
}
}
// Anything in the keptCells array list is still live.
// So rather than removing the cells from the array list
// which would be an O(n^2) operation, we just replace the list
logEntry.getEdit().setCells(keptCells);
}
示例4: postGetOp
import org.apache.hadoop.hbase.CellUtil; //导入方法依赖的package包/类
@Override
public void postGetOp(final ObserverContext<RegionCoprocessorEnvironment> c, final Get get,
final List<Cell> results) {
RegionCoprocessorEnvironment e = c.getEnvironment();
assertNotNull(e);
assertNotNull(e.getRegion());
assertNotNull(get);
assertNotNull(results);
if (e.getRegion().getTableDesc().getTableName().equals(
TestRegionObserverInterface.TEST_TABLE)) {
boolean foundA = false;
boolean foundB = false;
boolean foundC = false;
for (Cell kv: results) {
if (CellUtil.matchingFamily(kv, TestRegionObserverInterface.A)) {
foundA = true;
}
if (CellUtil.matchingFamily(kv, TestRegionObserverInterface.B)) {
foundB = true;
}
if (CellUtil.matchingFamily(kv, TestRegionObserverInterface.C)) {
foundC = true;
}
}
assertTrue(foundA);
assertTrue(foundB);
assertTrue(foundC);
}
ctPostGet.incrementAndGet();
}
示例5: filterExpectedResults
import org.apache.hadoop.hbase.CellUtil; //导入方法依赖的package包/类
private List<Cell> filterExpectedResults(Set<String> qualSet,
byte[] startRow, byte[] endRow, int maxVersions) {
final List<Cell> filteredKVs = new ArrayList<Cell>();
final Map<String, Integer> verCount = new HashMap<String, Integer>();
for (Cell kv : expectedKVs) {
if (startRow.length > 0 &&
Bytes.compareTo(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(),
startRow, 0, startRow.length) < 0) {
continue;
}
// In this unit test the end row is always inclusive.
if (endRow.length > 0 &&
Bytes.compareTo(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(),
endRow, 0, endRow.length) > 0) {
continue;
}
if (!qualSet.isEmpty() && (!CellUtil.matchingFamily(kv, FAMILY_BYTES)
|| !qualSet.contains(Bytes.toString(CellUtil.cloneQualifier(kv))))) {
continue;
}
final String rowColStr =
Bytes.toStringBinary(CellUtil.cloneRow(kv)) + "/"
+ Bytes.toStringBinary(CellUtil.cloneFamily(kv)) + ":"
+ Bytes.toStringBinary(CellUtil.cloneQualifier(kv));
final Integer curNumVer = verCount.get(rowColStr);
final int newNumVer = curNumVer != null ? (curNumVer + 1) : 1;
if (newNumVer <= maxVersions) {
filteredKVs.add(kv);
verCount.put(rowColStr, newNumVer);
}
}
return filteredKVs;
}
示例6: hasColumn
import org.apache.hadoop.hbase.CellUtil; //导入方法依赖的package包/类
private boolean hasColumn(final List<Cell> kvs, final byte [] family,
final byte [] qualifier) {
for (Cell kv: kvs) {
if (CellUtil.matchingFamily(kv, family) && CellUtil.matchingQualifier(kv, qualifier)) {
return true;
}
}
return false;
}
示例7: getColumn
import org.apache.hadoop.hbase.CellUtil; //导入方法依赖的package包/类
private Cell getColumn(final List<Cell> kvs, final byte [] family,
final byte [] qualifier) {
for (Cell kv: kvs) {
if (CellUtil.matchingFamily(kv, family) && CellUtil.matchingQualifier(kv, qualifier)) {
return kv;
}
}
return null;
}
示例8: isMetaEditFamily
import org.apache.hadoop.hbase.CellUtil; //导入方法依赖的package包/类
public static boolean isMetaEditFamily(Cell cell) {
return CellUtil.matchingFamily(cell, METAFAMILY);
}
示例9: verifyAllEditsMadeItIn
import org.apache.hadoop.hbase.CellUtil; //导入方法依赖的package包/类
/**
* @param fs
* @param conf
* @param edits
* @param region
* @return Return how many edits seen.
* @throws IOException
*/
private int verifyAllEditsMadeItIn(final FileSystem fs, final Configuration conf,
final Path edits, final HRegion region)
throws IOException {
int count = 0;
// Based on HRegion#replayRecoveredEdits
WAL.Reader reader = null;
try {
reader = WALFactory.createReader(fs, edits, conf);
WAL.Entry entry;
while ((entry = reader.next()) != null) {
WALKey key = entry.getKey();
WALEdit val = entry.getEdit();
count++;
// Check this edit is for this region.
if (!Bytes.equals(key.getEncodedRegionName(),
region.getRegionInfo().getEncodedNameAsBytes())) {
continue;
}
Cell previous = null;
for (Cell cell: val.getCells()) {
if (CellUtil.matchingFamily(cell, WALEdit.METAFAMILY)) continue;
if (previous != null && CellComparator.compareRows(previous, cell) == 0) continue;
previous = cell;
Get g = new Get(CellUtil.cloneRow(cell));
Result r = region.get(g);
boolean found = false;
for (CellScanner scanner = r.cellScanner(); scanner.advance();) {
Cell current = scanner.current();
if (CellComparator.compare(cell, current, true) == 0) {
found = true;
break;
}
}
assertTrue("Failed to find " + cell, found);
}
}
} finally {
if (reader != null) reader.close();
}
return count;
}