本文整理匯總了Java中org.apache.hadoop.hbase.index.client.Range類的典型用法代碼示例。如果您正苦於以下問題:Java Range類的具體用法?Java Range怎麽用?Java Range使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
Range類屬於org.apache.hadoop.hbase.index.client包,在下文中一共展示了Range類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: printRange
import org.apache.hadoop.hbase.index.client.Range; //導入依賴的package包/類
public static String printRange(Range r) {
StringBuilder sb = new StringBuilder();
sb.append("[" + Bytes.toString(r.getFamily()) + ":" + Bytes.toString(r.getQualifier())
+ "], values (");
if (r.getStartValue() != null) {
sb.append(LCIndexConstant.getStringOfValueAndType(r.getDataType(), r.getStartValue()));
if (r.getStartType() == CompareOp.EQUAL || r.getStartType() == CompareOp.NOT_EQUAL) {
sb.append(" <== ").append(r.getStartType()).append(" )");
return sb.toString();
}
} else {
sb.append("null");
}
sb.append(", ");
if (r.getStopValue() != null) {
sb.append(LCIndexConstant.getStringOfValueAndType(r.getDataType(), r.getStopValue()));
} else {
sb.append("MAX");
}
sb.append(")");
return sb.toString();
}
示例2: getIndexScanScale
import org.apache.hadoop.hbase.index.client.Range; //導入依賴的package包/類
public float getIndexScanScale(Range r) {
long rangeSize = 0, totalSize = 0;
lock.readLock().lock();
try {
for (StoreFile sf : this.getStorefiles()) {
// LCTODO: next, init index file reader after compaction!
long[] tmp = sf.getIndexReader().getIndexFileReader().getRangeScale(r);
rangeSize += tmp[0];
totalSize += tmp[1];
}
} finally {
lock.readLock().unlock();
}
if (totalSize == 0) {
return 0.0f;
} else {
return (float) (((double) rangeSize) / ((double) totalSize));
}
}
示例3: mWinterToPrintRange
import org.apache.hadoop.hbase.index.client.Range; //導入依賴的package包/類
private String mWinterToPrintRange(Range r, byte[] cf, boolean isMain) {
StringBuilder sb = new StringBuilder();
byte[] targetQualifier = isMain ? null : r.getQualifier();
sb.append("[" + Bytes.toString(cf) + ":" + Bytes.toString(targetQualifier) + "], values (");
if (r.getStartValue() != null) {
sb.append(LCCIndexConstant.getStringOfValue(lccIndexQualifierType, targetQualifier,
r.getStartValue()));
if (r.getStartType() == CompareOp.EQUAL || r.getStartType() == CompareOp.NOT_EQUAL) {
sb.append(", ").append(r.getStartType()).append(")");
return sb.toString();
}
} else {
sb.append("null");
}
sb.append(", ");
if (r.getStopValue() != null) {
sb.append(LCCIndexConstant.getStringOfValue(lccIndexQualifierType, targetQualifier,
r.getStopValue()));
} else {
sb.append("MAX");
}
sb.append(")");
return sb.toString();
}
示例4: mWinterShouldDiscardRow
import org.apache.hadoop.hbase.index.client.Range; //導入依賴的package包/類
private boolean mWinterShouldDiscardRow(Range r, byte[] row) {
if (r.getStartValue() != null && r.getStartType() == CompareOp.GREATER) {
if (mWinterStringStartsWith(row, r.getStartValue())) {
// System.out.println("winter report on greater than start");
return true;
}
}
if (r.getStopValue() != null) {
if (r.getStopType() == CompareOp.LESS) {
if (mWinterStringStartsWith(row, r.getStopValue())) {
// System.out.println("winter report on stop and less");
return true;
}
}
}
return false;
}
示例5: getIndexScanScale
import org.apache.hadoop.hbase.index.client.Range; //導入依賴的package包/類
public float getIndexScanScale(Range r) {
long rangeSize = 0, totalSize = 0;
lock.readLock().lock();
try {
for (StoreFile sf : this.getStorefiles()) {
long[] tmp = sf.getIndexReader().getIndexFileReader().getRangeScale(r);
rangeSize += tmp[0];
totalSize += tmp[1];
}
} finally {
lock.readLock().unlock();
}
if (totalSize == 0) {
return 0.0f;
} else {
return (float) (((double) rangeSize) / ((double) totalSize));
}
}
示例6: printRange
import org.apache.hadoop.hbase.index.client.Range; //導入依賴的package包/類
public static String printRange(Range r) {
StringBuilder sb = new StringBuilder();
sb.append("[" + Bytes.toString(r.getFamily()) + ":" + Bytes.toString(r.getQualifier())
+ "], values (");
if (r.getStartValue() != null) {
sb.append(LCCIndexConstant.getStringOfValueAndType(r.getDataType(), r.getStartValue()));
if (r.getStartType() == CompareOp.EQUAL || r.getStartType() == CompareOp.NOT_EQUAL) {
sb.append(" <== ").append(r.getStartType()).append(" )");
return sb.toString();
}
} else {
sb.append("null");
}
sb.append(", ");
if (r.getStopValue() != null) {
sb.append(LCCIndexConstant.getStringOfValueAndType(r.getDataType(), r.getStopValue()));
} else {
sb.append("MAX");
}
sb.append(")");
return sb.toString();
}
示例7: getScanner
import org.apache.hadoop.hbase.index.client.Range; //導入依賴的package包/類
@Override
public ResultScanner getScanner() throws IOException {
Scan scan = new Scan();
FilterList filters = new FilterList();
for (Range range : ranges) {
if (range.getStartValue() != null) {
filters.addFilter(new SingleColumnValueFilter(range.getFamily(), range.getQualifier(),
range.getStartType(), range.getStartValue()));
}
if (range.getStopValue() != null) {
filters.addFilter(new SingleColumnValueFilter(range.getFamily(), range.getQualifier(),
range.getStopType(), range.getStopValue()));
}
System.out.println("coffey hbase main index range: " + Bytes.toString(range.getColumn())
+ " ["
+ LCCIndexConstant.getStringOfValueAndType(range.getDataType(), range.getStartValue())
+ ","
+ LCCIndexConstant.getStringOfValueAndType(range.getDataType(), range.getStopValue())
+ "]");
scan.setCacheBlocks(false);
}
scan.setCacheBlocks(false);
scan.setFilter(filters);
return table.getScanner(scan);
}
示例8: getScanner
import org.apache.hadoop.hbase.index.client.Range; //導入依賴的package包/類
@Override
public ResultScanner getScanner() throws IOException {
Scan scan = new Scan();
RangeList list = new RangeList();
FilterList filters = new FilterList();
for (Range r : ranges) {
list.addRange(r);
if (r.getStartValue() != null) {
filters.addFilter(new SingleColumnValueFilter(Bytes.toBytes(TPCHConstants.FAMILY_NAME), r
.getQualifier(), r.getStartType(), r.getStartValue()));
}
if (r.getStopValue() != null) {
filters.addFilter(new SingleColumnValueFilter(Bytes.toBytes(TPCHConstants.FAMILY_NAME), r
.getQualifier(), r.getStopType(), r.getStopValue()));
}
}
scan.setFilter(filters);
scan.setAttribute(IndexConstants.SCAN_WITH_INDEX, Writables.getBytes(list));
scan.setAttribute(IndexConstants.MAX_SCAN_SCALE, Bytes.toBytes(0.3));
scan.setCacheBlocks(false);
return table.getScanner(scan);
}
示例9: work
import org.apache.hadoop.hbase.index.client.Range; //導入依賴的package包/類
public void work(String confPath, String assignedFile, String testClass, String rangeFilterFile,
int cacheSize) throws IOException, InterruptedException {
double totalTime = 0, temp;
ArrayList<Double> timeList = new ArrayList<Double>();
List<Range> rangeList = getRangesFromFile(rangeFilterFile);
for (Range r : rangeList) {
System.out.println("coffey get range: " + TPCHConstants.printRange(r));
}
for (int i = 0; i < ROUND; ++i) {
temp = runOneTime(confPath, assignedFile, testClass, rangeList, cacheSize);
totalTime += temp;
timeList.add(temp);
if (ROUND > 1) {
Thread.sleep(TPCHConstants.ROUND_SLEEP_TIME);
}
}
System.out.println("coffey report scan, run " + testClass + ", have run " + ROUND
+ " times, avg: " + totalTime / ROUND);
System.out.println("coffey reporting scan each time: ");
for (int i = 0; i < timeList.size(); ++i) {
System.out.println("coffey report scan round " + i + ": " + timeList.get(i));
}
}
示例10: getIndexScanScale
import org.apache.hadoop.hbase.index.client.Range; //導入依賴的package包/類
public float getIndexScanScale(Range r) {
long rangeSize = 0, totalSize = 0;
lock.readLock().lock();
try {
for (StoreFile sf : this.getStorefiles()) {
long[] tmp = sf.getIndexReader().getIndexFileReader().getRangeScale(r);
rangeSize += tmp[0];
totalSize += tmp[1];
}
} finally {
lock.readLock().unlock();
}
if (totalSize == 0) {
return 0.0f;
} else {
return (float) (((double) rangeSize) / ((double) totalSize));
}
}
示例11: ConditionTreeLeafNode
import org.apache.hadoop.hbase.index.client.Range; //導入依賴的package包/類
public ConditionTreeLeafNode(HRegion region, Range range, float maxScale) {
this.range = range;
this.scale = region.getStore(range.getFamily()).getIndexScanScale(range);
// prune large scale node
if (this.scale > maxScale) {
Log.info("LCINFO: A prune=true because (this.scale) " + this.scale + " > " + maxScale
+ "(max scale)");
this.prune = true;
}
}
示例12: getStoreIndexScanner
import org.apache.hadoop.hbase.index.client.Range; //導入依賴的package包/類
@Override public StoreIndexScanner getStoreIndexScanner(Range r, Scan s, Set<ByteArray> joinSet,
boolean isAND) throws IOException {
lock.readLock().lock();
try {
StoreIndexScanner sis =
new StoreIndexScanner(this, this.memstore.getScanners(region.getMVCC().getReadPoint()),
this.comparator, this.irIndexComparator, r, s, joinSet, isAND);
this.addChangedReaderObserver(sis);
return sis;
} finally {
lock.readLock().unlock();
}
}
示例13: ConditionTreeLeafNode
import org.apache.hadoop.hbase.index.client.Range; //導入依賴的package包/類
public ConditionTreeLeafNode(HRegion region, Range range, float maxScale) {
this.range = range;
this.scale = region.getStore(range.getFamily()).getIndexScanScale(range);
// prune large scale node
if (this.scale > maxScale) {
System.out.println("winter scale too big");
this.prune = true;
}
}
示例14: mWinterUpdatePriLCCIndex
import org.apache.hadoop.hbase.index.client.Range; //導入依賴的package包/類
private Range mWinterUpdatePriLCCIndex(ArrayList<Range> rangeList,
Set<Entry<byte[], NavigableSet<byte[]>>> sets) throws IOException {
assert rangeList != null && rangeList.size() > 0;
// Range target = mWinterCalProperOneAsMainIndex(rangeList);
Range target = selectTheBestRange(rangeList, sets);
if (target == null) {
throw new IOException("winter no main target range is found!");
} else {
// System.out.println("winter select the best target range: "
// + Bytes.toString(target.getQualifier()));
}
rangeList.remove(target);
lccMainQualifier = target.getQualifier();
byte[] startValue = target.getStartValue();
byte[] stopValue = target.getStopValue();
if (startValue != null) {
startValue =
Bytes.add(mWinterCalRangeKey(target.getQualifier(), startValue),
LCCIndexConstant.DELIMITER_BYTES);
}
if (stopValue != null) {
stopValue =
Bytes.add(mWinterCalRangeKey(target.getQualifier(), stopValue),
LCCIndexConstant.DELIMITER_PLUS_ONE_BYTES);
} else if (target.getStartType() == CompareOp.EQUAL) {
stopValue =
Bytes.add(mWinterCalRangeKey(target.getQualifier(), target.getStartValue()),
LCCIndexConstant.DELIMITER_PLUS_ONE_BYTES);
}
return new Range(
LCCIndexGenerator.mWinterGenerateLCCIndexFamily_Bytes(target.getQualifier()), startValue,
target.getStartType(), stopValue, target.getStopType());
}
示例15: mWinterPrintRanges
import org.apache.hadoop.hbase.index.client.Range; //導入依賴的package包/類
private void mWinterPrintRanges(Range mainRange, ArrayList<Range> lccRangeFilters) {
String temp;
temp = mWinterToPrintRange(mainRange, mainRange.getFamily(), true);
System.out.println("winter filter main range: " + temp);
for (Range r : lccRangeFilters) {
temp = mWinterToPrintRange(r, mainRange.getFamily(), false);
assert temp != null && temp.length() > 0;
System.out.println("winter filter range: " + temp);
}
}