本文整理汇总了Java中org.apache.hadoop.hbase.CellUtil类的典型用法代码示例。如果您正苦于以下问题:Java CellUtil类的具体用法?Java CellUtil怎么用?Java CellUtil使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
CellUtil类属于org.apache.hadoop.hbase包,在下文中一共展示了CellUtil类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: recoverClusteringResult
import org.apache.hadoop.hbase.CellUtil; //导入依赖的package包/类
public static List<Cell> recoverClusteringResult(List<Cell> cells, byte[] family,
byte[] qualifier) {
if (cells == null || cells.size() == 0) return cells;
byte[][] indexColumn = IndexPutParser.parseIndexRowKey(cells.get(0).getRow());
List<Cell> list = new ArrayList<>(cells.size() + 1);
for (Cell cell : cells) {
byte[] tag = cell.getTagsArray();
if (tag != null && tag.length > KeyValue.MAX_TAGS_LENGTH) tag = null;
KeyValue kv =
new KeyValue(indexColumn[0], CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell),
cell.getTimestamp(), KeyValue.Type.codeToType(cell.getTypeByte()),
CellUtil.cloneValue(cell), tag);
list.add(kv);
}
list.add(new KeyValue(indexColumn[0], family, qualifier, indexColumn[1]));
Collections.sort(list, KeyValue.COMPARATOR);
return list;
}
示例2: resultFromHBase
import org.apache.hadoop.hbase.CellUtil; //导入依赖的package包/类
/**
* Creates a {@link TResult} (Thrift) from a {@link Result} (HBase).
*
* @param in the <code>Result</code> to convert
*
* @return converted result, returns an empty result if the input is <code>null</code>
*/
public static TResult resultFromHBase(Result in) {
Cell[] raw = in.rawCells();
TResult out = new TResult();
byte[] row = in.getRow();
if (row != null) {
out.setRow(in.getRow());
}
List<TColumnValue> columnValues = new ArrayList<TColumnValue>();
for (Cell kv : raw) {
TColumnValue col = new TColumnValue();
col.setFamily(CellUtil.cloneFamily(kv));
col.setQualifier(CellUtil.cloneQualifier(kv));
col.setTimestamp(kv.getTimestamp());
col.setValue(CellUtil.cloneValue(kv));
if (kv.getTagsLength() > 0) {
col.setTags(CellUtil.getTagArray(kv));
}
columnValues.add(col);
}
out.setColumnValues(columnValues);
return out;
}
示例3: binarySearch
import org.apache.hadoop.hbase.CellUtil; //导入依赖的package包/类
protected int binarySearch(final Cell [] kvs,
final byte [] family,
final byte [] qualifier) {
Cell searchTerm =
KeyValueUtil.createFirstOnRow(CellUtil.cloneRow(kvs[0]),
family, qualifier);
// pos === ( -(insertion point) - 1)
int pos = Arrays.binarySearch(kvs, searchTerm, KeyValue.COMPARATOR);
// never will exact match
if (pos < 0) {
pos = (pos+1) * -1;
// pos is now insertion point
}
if (pos == kvs.length) {
return -1; // doesn't exist
}
return pos;
}
示例4: extractAndPartitionTags
import org.apache.hadoop.hbase.CellUtil; //导入依赖的package包/类
/**
* Extracts and partitions the visibility tags and nonVisibility Tags
*
* @param cell - the cell for which we would extract and partition the
* visibility and non visibility tags
* @param visTags
* - all the visibilty tags of type TagType.VISIBILITY_TAG_TYPE would
* be added to this list
* @param nonVisTags - all the non visibility tags would be added to this list
* @return - the serailization format of the tag. Can be null if no tags are found or
* if there is no visibility tag found
*/
public static Byte extractAndPartitionTags(Cell cell, List<Tag> visTags,
List<Tag> nonVisTags) {
Byte serializationFormat = null;
if (cell.getTagsLength() > 0) {
Iterator<Tag> tagsIterator = CellUtil.tagsIterator(cell.getTagsArray(), cell.getTagsOffset(),
cell.getTagsLength());
while (tagsIterator.hasNext()) {
Tag tag = tagsIterator.next();
if (tag.getType() == TagType.VISIBILITY_EXP_SERIALIZATION_FORMAT_TAG_TYPE) {
serializationFormat = tag.getBuffer()[tag.getTagOffset()];
} else if (tag.getType() == VISIBILITY_TAG_TYPE) {
visTags.add(tag);
} else {
// ignore string encoded visibility expressions, will be added in replication handling
nonVisTags.add(tag);
}
}
}
return serializationFormat;
}
示例5: verifyData
import org.apache.hadoop.hbase.CellUtil; //导入依赖的package包/类
private void verifyData(Region newReg, int startRow, int numRows, byte[] qf, byte[]... families)
throws IOException {
for (int i = startRow; i < startRow + numRows; i++) {
byte[] row = Bytes.toBytes("" + i);
Get get = new Get(row);
for (byte[] family : families) {
get.addColumn(family, qf);
}
Result result = newReg.get(get);
Cell[] raw = result.rawCells();
assertEquals(families.length, result.size());
for (int j = 0; j < families.length; j++) {
assertTrue(CellUtil.matchingRow(raw[j], row));
assertTrue(CellUtil.matchingFamily(raw[j], families[j]));
assertTrue(CellUtil.matchingQualifier(raw[j], qf));
}
}
}
示例6: toStringMap
import org.apache.hadoop.hbase.CellUtil; //导入依赖的package包/类
private static Map<String, Object> toStringMap(Cell cell) {
Map<String, Object> stringMap = new HashMap<String, Object>();
stringMap.put("row",
Bytes.toStringBinary(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()));
stringMap.put("family", Bytes.toStringBinary(cell.getFamilyArray(), cell.getFamilyOffset(),
cell.getFamilyLength()));
stringMap.put("qualifier",
Bytes.toStringBinary(cell.getQualifierArray(), cell.getQualifierOffset(),
cell.getQualifierLength()));
stringMap.put("timestamp", cell.getTimestamp());
stringMap.put("vlen", cell.getValueLength());
if (cell.getTagsLength() > 0) {
List<String> tagsString = new ArrayList<String>();
Iterator<Tag> tagsIterator = CellUtil.tagsIterator(cell.getTagsArray(), cell.getTagsOffset(),
cell.getTagsLength());
while (tagsIterator.hasNext()) {
Tag tag = tagsIterator.next();
tagsString.add((tag.getType()) + ":"
+ Bytes.toStringBinary(tag.getBuffer(), tag.getTagOffset(), tag.getTagLength()));
}
stringMap.put("tag", tagsString);
}
return stringMap;
}
示例7: append
import org.apache.hadoop.hbase.CellUtil; //导入依赖的package包/类
@Override
public long append(HTableDescriptor htd, HRegionInfo info, WALKey key, WALEdit edits,
boolean inMemstore) {
if (!this.listeners.isEmpty()) {
final long start = System.nanoTime();
long len = 0;
for (Cell cell : edits.getCells()) {
len += CellUtil.estimatedSerializedSizeOf(cell);
}
final long elapsed = (System.nanoTime() - start)/1000000l;
for (WALActionsListener listener : this.listeners) {
listener.postAppend(len, elapsed);
}
}
return -1;
}
示例8: countDeleteMarkers
import org.apache.hadoop.hbase.CellUtil; //导入依赖的package包/类
private int countDeleteMarkers(Region region) throws IOException {
Scan s = new Scan();
s.setRaw(true);
// use max versions from the store(s)
s.setMaxVersions(region.getStores().iterator().next().getScanInfo().getMaxVersions());
InternalScanner scan = region.getScanner(s);
List<Cell> kvs = new ArrayList<Cell>();
int res = 0;
boolean hasMore;
do {
hasMore = scan.next(kvs);
for (Cell kv : kvs) {
if(CellUtil.isDelete(kv)) res++;
}
kvs.clear();
} while (hasMore);
scan.close();
return res;
}
示例9: next
import org.apache.hadoop.hbase.CellUtil; //导入依赖的package包/类
@Override
public Result next() throws IOException {
values.clear();
scanner.nextRaw(values);
if (values.isEmpty()) {
//we are done
return null;
}
Result result = Result.create(values);
if (this.scanMetrics != null) {
long resultSize = 0;
for (Cell cell : values) {
resultSize += CellUtil.estimatedSerializedSizeOf(cell);
}
this.scanMetrics.countOfBytesInResults.addAndGet(resultSize);
}
return result;
}
示例10: list
import org.apache.hadoop.hbase.CellUtil; //导入依赖的package包/类
public synchronized NavigableSet<NamespaceDescriptor> list() throws IOException {
NavigableSet<NamespaceDescriptor> ret =
Sets.newTreeSet(NamespaceDescriptor.NAMESPACE_DESCRIPTOR_COMPARATOR);
ResultScanner scanner = getNamespaceTable().getScanner(HTableDescriptor.NAMESPACE_FAMILY_INFO_BYTES);
try {
for(Result r : scanner) {
byte[] val = CellUtil.cloneValue(r.getColumnLatestCell(
HTableDescriptor.NAMESPACE_FAMILY_INFO_BYTES,
HTableDescriptor.NAMESPACE_COL_DESC_BYTES));
ret.add(ProtobufUtil.toNamespaceDescriptor(
HBaseProtos.NamespaceDescriptor.parseFrom(val)));
}
} finally {
scanner.close();
}
return ret;
}
示例11: testWhileMatchFilterWithFilterRowKey
import org.apache.hadoop.hbase.CellUtil; //导入依赖的package包/类
/**
* Tests the the {@link WhileMatchFilter} works in combination with a
* {@link Filter} that uses the
* {@link Filter#filterRowKey(byte[], int, int)} method.
*
* See HBASE-2258.
*
* @throws Exception
*/
@Test
public void testWhileMatchFilterWithFilterRowKey() throws Exception {
Scan s = new Scan();
String prefix = "testRowOne";
WhileMatchFilter filter = new WhileMatchFilter(new PrefixFilter(Bytes.toBytes(prefix)));
s.setFilter(filter);
InternalScanner scanner = this.region.getScanner(s);
while (true) {
ArrayList<Cell> values = new ArrayList<Cell>();
boolean isMoreResults = scanner.next(values);
if (!isMoreResults || !Bytes.toString(CellUtil.cloneRow(values.get(0))).startsWith(prefix)) {
assertTrue("The WhileMatchFilter should now filter all remaining", filter.filterAllRemaining());
}
if (!isMoreResults) {
break;
}
}
}
示例12: backwardSeek
import org.apache.hadoop.hbase.CellUtil; //导入依赖的package包/类
@Override
public boolean backwardSeek(Cell seekKey) throws IOException {
if (current == null) {
return false;
}
heap.add(current);
current = null;
KeyValueScanner scanner;
while ((scanner = heap.poll()) != null) {
Cell topKey = scanner.peek();
if ((CellUtil.matchingRow(seekKey, topKey) && comparator
.getComparator().compare(seekKey, topKey) <= 0)
|| comparator.getComparator().compareRows(seekKey, topKey) > 0) {
heap.add(scanner);
current = pollRealKV();
return current != null;
}
if (!scanner.backwardSeek(seekKey)) {
scanner.close();
} else {
heap.add(scanner);
}
}
return false;
}
示例13: FSWALEntry
import org.apache.hadoop.hbase.CellUtil; //导入依赖的package包/类
FSWALEntry(final long sequence, final WALKey key, final WALEdit edit,
final HTableDescriptor htd, final HRegionInfo hri, final boolean inMemstore) {
super(key, edit);
this.inMemstore = inMemstore;
this.htd = htd;
this.hri = hri;
this.sequence = sequence;
if (inMemstore) {
// construct familyNames here to reduce the work of log sinker.
ArrayList<Cell> cells = this.getEdit().getCells();
if (CollectionUtils.isEmpty(cells)) {
this.familyNames = Collections.<byte[]> emptySet();
} else {
Set<byte[]> familySet = Sets.newTreeSet(Bytes.BYTES_COMPARATOR);
for (Cell cell : cells) {
if (!CellUtil.matchingFamily(cell, WALEdit.METAFAMILY)) {
familySet.add(CellUtil.cloneFamily(cell));
}
}
this.familyNames = Collections.unmodifiableSet(familySet);
}
} else {
this.familyNames = Collections.<byte[]> emptySet();
}
}
示例14: stampRegionSequenceId
import org.apache.hadoop.hbase.CellUtil; //导入依赖的package包/类
/**
* Here is where a WAL edit gets its sequenceid.
* @return The sequenceid we stamped on this edit.
* @throws IOException
*/
long stampRegionSequenceId() throws IOException {
long regionSequenceId = WALKey.NO_SEQUENCE_ID;
MultiVersionConcurrencyControl mvcc = getKey().getMvcc();
MultiVersionConcurrencyControl.WriteEntry we = null;
if (mvcc != null) {
we = mvcc.begin();
regionSequenceId = we.getWriteNumber();
}
if (!this.getEdit().isReplay() && inMemstore) {
for (Cell c:getEdit().getCells()) {
CellUtil.setSequenceId(c, regionSequenceId);
}
}
// This has to stay in this order
WALKey key = getKey();
key.setLogSeqNum(regionSequenceId);
key.setWriteEntry(we);
return regionSequenceId;
}
示例15: testRpcScheduler
import org.apache.hadoop.hbase.CellUtil; //导入依赖的package包/类
@Ignore
@Test
public void testRpcScheduler() throws IOException, InterruptedException {
PriorityFunction qosFunction = mock(PriorityFunction.class);
Abortable abortable = new AbortServer();
RpcScheduler scheduler = new SimpleRpcScheduler(CONF, 2, 0, 0, qosFunction, abortable, 0);
RpcServer rpcServer = new TestRpcServer(scheduler);
RpcClientImpl client = new RpcClientImpl(CONF, HConstants.CLUSTER_ID_DEFAULT);
try {
rpcServer.start();
MethodDescriptor md = SERVICE.getDescriptorForType().findMethodByName("echo");
EchoRequestProto param = EchoRequestProto.newBuilder().setMessage("hello").build();
PayloadCarryingRpcController controller =
new PayloadCarryingRpcController(CellUtil.createCellScanner(ImmutableList.of(CELL)));
InetSocketAddress address = rpcServer.getListenerAddress();
if (address == null) {
throw new IOException("Listener channel is closed");
}
client.call(controller, md, param, md.getOutputType().toProto(), User.getCurrent(),
address, new MetricsConnection.CallStats());
} catch (Throwable e) {
assert(abortable.isAborted() == true);
} finally {
rpcServer.stop();
}
}