本文整理汇总了Java中org.apache.hadoop.hbase.util.ByteRange类的典型用法代码示例。如果您正苦于以下问题:Java ByteRange类的具体用法?Java ByteRange怎么用?Java ByteRange使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
ByteRange类属于org.apache.hadoop.hbase.util包,在下文中一共展示了ByteRange类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testLABRandomAllocation
import org.apache.hadoop.hbase.util.ByteRange; //导入依赖的package包/类
/**
* Test a bunch of random allocations
*/
@Test
public void testLABRandomAllocation() {
Random rand = new Random();
MemStoreLAB mslab = new HeapMemStoreLAB();
int expectedOff = 0;
byte[] lastBuffer = null;
// 100K iterations by 0-1K alloc -> 50MB expected
// should be reasonable for unit test and also cover wraparound
// behavior
for (int i = 0; i < 100000; i++) {
int size = rand.nextInt(1000);
ByteRange alloc = mslab.allocateBytes(size);
if (alloc.getBytes() != lastBuffer) {
expectedOff = 0;
lastBuffer = alloc.getBytes();
}
assertEquals(expectedOff, alloc.getOffset());
assertTrue("Allocation overruns buffer",
alloc.getOffset() + size <= alloc.getBytes().length);
expectedOff += size;
}
}
示例2: split
import org.apache.hadoop.hbase.util.ByteRange; //导入依赖的package包/类
/**
* Called when we need to convert a leaf node into a branch with 2 leaves. Comments inside the
* method assume we have token BAA starting at tokenStartOffset=0 and are adding BOO. The output
* will be 3 nodes:<br>
* <ul>
* <li>1: B <- branch
* <li>2: AA <- leaf
* <li>3: OO <- leaf
* </ul>
*
* @param numTokenBytesToRetain => 1 (the B)
* @param bytes => BOO
*/
protected void split(int numTokenBytesToRetain, final ByteRange bytes) {
int childNodeDepth = nodeDepth;
int childTokenStartOffset = tokenStartOffset + numTokenBytesToRetain;
//create leaf AA
TokenizerNode firstChild = builder.addNode(this, childNodeDepth, childTokenStartOffset,
token, numTokenBytesToRetain);
firstChild.setNumOccurrences(numOccurrences);// do before clearing this node's numOccurrences
token.setLength(numTokenBytesToRetain);//shorten current token from BAA to B
numOccurrences = 0;//current node is now a branch
moveChildrenToDifferentParent(firstChild);//point the new leaf (AA) to the new branch (B)
addChild(firstChild);//add the new leaf (AA) to the branch's (B's) children
//create leaf OO
TokenizerNode secondChild = builder.addNode(this, childNodeDepth, childTokenStartOffset,
bytes, tokenStartOffset + numTokenBytesToRetain);
addChild(secondChild);//add the new leaf (00) to the branch's (B's) children
// we inserted branch node B as a new level above/before the two children, so increment the
// depths of the children below
firstChild.incrementNodeDepthRecursively();
secondChild.incrementNodeDepthRecursively();
}
示例3: addNode
import org.apache.hadoop.hbase.util.ByteRange; //导入依赖的package包/类
protected TokenizerNode addNode(TokenizerNode parent, int nodeDepth, int tokenStartOffset,
final ByteRange token, int inputTokenOffset) {
int inputTokenLength = token.getLength() - inputTokenOffset;
int tokenOffset = appendTokenAndRepointByteRange(token, inputTokenOffset);
TokenizerNode node = null;
if (nodes.size() <= numNodes) {
node = new TokenizerNode(this, parent, nodeDepth, tokenStartOffset, tokenOffset,
inputTokenLength);
nodes.add(node);
} else {
node = nodes.get(numNodes);
node.reset();
node.reconstruct(this, parent, nodeDepth, tokenStartOffset, tokenOffset, inputTokenLength);
}
++numNodes;
return node;
}
示例4: store
import org.apache.hadoop.hbase.util.ByteRange; //导入依赖的package包/类
protected int store(ByteRange bytes) {
int indexOfNewElement = numUniqueRanges;
if (uniqueRanges.size() <= numUniqueRanges) {
uniqueRanges.add(new SimpleMutableByteRange());
}
ByteRange storedRange = uniqueRanges.get(numUniqueRanges);
int neededBytes = numBytes + bytes.getLength();
byteAppender = ArrayUtils.growIfNecessary(byteAppender, neededBytes, 2 * neededBytes);
bytes.deepCopyTo(byteAppender, numBytes);
storedRange.set(byteAppender, numBytes, bytes.getLength());// this isn't valid yet
numBytes += bytes.getLength();
uniqueIndexByUniqueRange.put(storedRange, indexOfNewElement);
int newestUniqueIndex = numUniqueRanges;
++numUniqueRanges;
return newestUniqueIndex;
}
示例5: toString
import org.apache.hadoop.hbase.util.ByteRange; //导入依赖的package包/类
/***************** standard methods ************************/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
int i = 0;
for (ByteRange r : sortedRanges) {
if (i > 0) {
sb.append("\n");
}
sb.append(i + " " + Bytes.toStringBinary(r.deepCopyToNewArray()));
++i;
}
sb.append("\ntotalSize:" + numBytes);
sb.append("\navgSize:" + getAvgSize());
return sb.toString();
}
示例6: split
import org.apache.hadoop.hbase.util.ByteRange; //导入依赖的package包/类
/**
* Called when we need to convert a leaf node into a branch with 2 leaves. Comments inside the
* method assume we have token BAA starting at tokenStartOffset=0 and are adding BOO. The output
* will be 3 nodes:<br/>
* <li>1: B <- branch
* <li>2: AA <- leaf
* <li>3: OO <- leaf
*
* @param numTokenBytesToRetain => 1 (the B)
* @param bytes => BOO
*/
protected void split(int numTokenBytesToRetain, final ByteRange bytes) {
int childNodeDepth = nodeDepth;
int childTokenStartOffset = tokenStartOffset + numTokenBytesToRetain;
//create leaf AA
TokenizerNode firstChild = builder.addNode(this, childNodeDepth, childTokenStartOffset,
token, numTokenBytesToRetain);
firstChild.setNumOccurrences(numOccurrences);// do before clearing this node's numOccurrences
token.setLength(numTokenBytesToRetain);//shorten current token from BAA to B
numOccurrences = 0;//current node is now a branch
moveChildrenToDifferentParent(firstChild);//point the new leaf (AA) to the new branch (B)
addChild(firstChild);//add the new leaf (AA) to the branch's (B's) children
//create leaf OO
TokenizerNode secondChild = builder.addNode(this, childNodeDepth, childTokenStartOffset,
bytes, tokenStartOffset + numTokenBytesToRetain);
addChild(secondChild);//add the new leaf (00) to the branch's (B's) children
// we inserted branch node B as a new level above/before the two children, so increment the
// depths of the children below
firstChild.incrementNodeDepthRecursively();
secondChild.incrementNodeDepthRecursively();
}
示例7: store
import org.apache.hadoop.hbase.util.ByteRange; //导入依赖的package包/类
protected int store(ByteRange bytes) {
int indexOfNewElement = numUniqueRanges;
if (uniqueRanges.size() <= numUniqueRanges) {
uniqueRanges.add(new SimpleByteRange());
}
ByteRange storedRange = uniqueRanges.get(numUniqueRanges);
int neededBytes = numBytes + bytes.getLength();
byteAppender = ArrayUtils.growIfNecessary(byteAppender, neededBytes, 2 * neededBytes);
bytes.deepCopyTo(byteAppender, numBytes);
storedRange.set(byteAppender, numBytes, bytes.getLength());// this isn't valid yet
numBytes += bytes.getLength();
uniqueIndexByUniqueRange.put(storedRange, indexOfNewElement);
int newestUniqueIndex = numUniqueRanges;
++numUniqueRanges;
return newestUniqueIndex;
}
示例8: VisibilityLabelFilter
import org.apache.hadoop.hbase.util.ByteRange; //导入依赖的package包/类
public VisibilityLabelFilter(VisibilityExpEvaluator expEvaluator,
Map<ByteRange, Integer> cfVsMaxVersions) {
this.expEvaluator = expEvaluator;
this.cfVsMaxVersions = cfVsMaxVersions;
this.curFamily = new SimpleMutableByteRange();
this.curQualifier = new SimpleMutableByteRange();
}
示例9: createVisibilityLabelFilter
import org.apache.hadoop.hbase.util.ByteRange; //导入依赖的package包/类
public static Filter createVisibilityLabelFilter(Region region, Authorizations authorizations)
throws IOException {
Map<ByteRange, Integer> cfVsMaxVersions = new HashMap<ByteRange, Integer>();
for (HColumnDescriptor hcd : region.getTableDesc().getFamilies()) {
cfVsMaxVersions.put(new SimpleMutableByteRange(hcd.getName()), hcd.getMaxVersions());
}
VisibilityLabelService vls = VisibilityLabelServiceManager.getInstance()
.getVisibilityLabelService();
Filter visibilityLabelFilter = new VisibilityLabelFilter(
vls.getVisibilityExpEvaluator(authorizations), cfVsMaxVersions);
return visibilityLabelFilter;
}
示例10: AccessControlFilter
import org.apache.hadoop.hbase.util.ByteRange; //导入依赖的package包/类
AccessControlFilter(TableAuthManager mgr, User ugi, TableName tableName,
Strategy strategy, Map<ByteRange, Integer> cfVsMaxVersions) {
authManager = mgr;
table = tableName;
user = ugi;
isSystemTable = tableName.isSystemTable();
this.strategy = strategy;
this.cfVsMaxVersions = cfVsMaxVersions;
this.prevFam = new SimpleMutableByteRange();
this.prevQual = new SimpleMutableByteRange();
}
示例11: allocateBytes
import org.apache.hadoop.hbase.util.ByteRange; //导入依赖的package包/类
/**
* Allocate a slice of the given length.
*
* If the size is larger than the maximum size specified for this
* allocator, returns null.
*/
@Override
public ByteRange allocateBytes(int size) {
Preconditions.checkArgument(size >= 0, "negative size");
// Callers should satisfy large allocations directly from JVM since they
// don't cause fragmentation as badly.
if (size > maxAlloc) {
return null;
}
while (true) {
Chunk c = getOrMakeChunk();
// Try to allocate from this chunk
int allocOffset = c.alloc(size);
if (allocOffset != -1) {
// We succeeded - this is the common case - small alloc
// from a big buffer
return new SimpleMutableByteRange(c.data, allocOffset, size);
}
// not enough space!
// try to retire this chunk
tryRetireChunk(c);
}
}
示例12: testLABLargeAllocation
import org.apache.hadoop.hbase.util.ByteRange; //导入依赖的package包/类
@Test
public void testLABLargeAllocation() {
MemStoreLAB mslab = new HeapMemStoreLAB();
ByteRange alloc = mslab.allocateBytes(2*1024*1024);
assertNull("2MB allocation shouldn't be satisfied by LAB.",
alloc);
}
示例13: testReusingChunks
import org.apache.hadoop.hbase.util.ByteRange; //导入依赖的package包/类
@Test
public void testReusingChunks() {
Random rand = new Random();
MemStoreLAB mslab = new HeapMemStoreLAB(conf);
int expectedOff = 0;
byte[] lastBuffer = null;
// Randomly allocate some bytes
for (int i = 0; i < 100; i++) {
int size = rand.nextInt(1000);
ByteRange alloc = mslab.allocateBytes(size);
if (alloc.getBytes() != lastBuffer) {
expectedOff = 0;
lastBuffer = alloc.getBytes();
}
assertEquals(expectedOff, alloc.getOffset());
assertTrue("Allocation overruns buffer", alloc.getOffset()
+ size <= alloc.getBytes().length);
expectedOff += size;
}
// chunks will be put back to pool after close
mslab.close();
int chunkCount = chunkPool.getPoolSize();
assertTrue(chunkCount > 0);
// reconstruct mslab
mslab = new HeapMemStoreLAB(conf);
// chunk should be got from the pool, so we can reuse it.
mslab.allocateBytes(1000);
assertEquals(chunkCount - 1, chunkPool.getPoolSize());
}
示例14: addAll
import org.apache.hadoop.hbase.util.ByteRange; //导入依赖的package包/类
/***************** building *************************/
public void addAll(ArrayList<ByteRange> sortedByteRanges) {
for (int i = 0; i < sortedByteRanges.size(); ++i) {
ByteRange byteRange = sortedByteRanges.get(i);
addSorted(byteRange);
}
}
示例15: addSorted
import org.apache.hadoop.hbase.util.ByteRange; //导入依赖的package包/类
public void addSorted(final ByteRange bytes) {
++numArraysAdded;
if (bytes.getLength() > maxElementLength) {
maxElementLength = bytes.getLength();
}
if (root == null) {
// nodeDepth of firstNode (non-root) is 1
root = addNode(null, 1, 0, bytes, 0);
} else {
root.addSorted(bytes);
}
}