本文整理汇总了Java中org.apache.hadoop.hbase.regionserver.compactions.Compactor.CellSink方法的典型用法代码示例。如果您正苦于以下问题:Java Compactor.CellSink方法的具体用法?Java Compactor.CellSink怎么用?Java Compactor.CellSink使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.regionserver.compactions.Compactor
的用法示例。
在下文中一共展示了Compactor.CellSink方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: performFlush
import org.apache.hadoop.hbase.regionserver.compactions.Compactor; //导入方法依赖的package包/类
/**
* Performs memstore flush, writing data from scanner into sink.
*
* @param scanner Scanner to get data from.
* @param sink Sink to write data to. Could be StoreFile.Writer.
* @param smallestReadPoint Smallest read point used for the flush.
*/
protected void performFlush(InternalScanner scanner, Compactor.CellSink sink,
long smallestReadPoint) throws IOException {
int compactionKVMax =
conf.getInt(HConstants.COMPACTION_KV_MAX, HConstants.COMPACTION_KV_MAX_DEFAULT);
ScannerContext scannerContext =
ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build();
List<Cell> kvs = new ArrayList<Cell>();
boolean hasMore;
do {
hasMore = scanner.next(kvs, scannerContext);
if (!kvs.isEmpty()) {
for (Cell c : kvs) {
// If we know that this KV is going to be included always, then let us
// set its memstoreTS to 0. This will help us save space when writing
// to
// disk.
sink.append(c);
}
kvs.clear();
}
} while (hasMore);
}
示例2: performFlush
import org.apache.hadoop.hbase.regionserver.compactions.Compactor; //导入方法依赖的package包/类
/**
* Performs memstore flush, writing data from scanner into sink.
* @param scanner Scanner to get data from.
* @param sink Sink to write data to. Could be StoreFile.Writer.
* @param smallestReadPoint Smallest read point used for the flush.
*/
protected void performFlush(InternalScanner scanner,
Compactor.CellSink sink, long smallestReadPoint) throws IOException {
int compactionKVMax =
conf.getInt(HConstants.COMPACTION_KV_MAX, HConstants.COMPACTION_KV_MAX_DEFAULT);
List<Cell> kvs = new ArrayList<Cell>();
boolean hasMore;
do {
hasMore = scanner.next(kvs, compactionKVMax);
if (!kvs.isEmpty()) {
for (Cell c : kvs) {
// If we know that this KV is going to be included always, then let us
// set its memstoreTS to 0. This will help us save space when writing to
// disk.
sink.append(c);
}
kvs.clear();
}
} while (hasMore);
}
示例3: performFlush
import org.apache.hadoop.hbase.regionserver.compactions.Compactor; //导入方法依赖的package包/类
/**
* Performs memstore flush, writing data from scanner into sink.
* @param scanner Scanner to get data from.
* @param sink Sink to write data to. Could be StoreFile.Writer.
* @param smallestReadPoint Smallest read point used for the flush.
* @return Bytes flushed.
*/
protected long performFlush(InternalScanner scanner,
Compactor.CellSink sink, long smallestReadPoint) throws IOException {
int compactionKVMax =
conf.getInt(HConstants.COMPACTION_KV_MAX, HConstants.COMPACTION_KV_MAX_DEFAULT);
List<Cell> kvs = new ArrayList<Cell>();
boolean hasMore;
long flushed = 0;
do {
hasMore = scanner.next(kvs, compactionKVMax);
if (!kvs.isEmpty()) {
for (Cell c : kvs) {
// If we know that this KV is going to be included always, then let us
// set its memstoreTS to 0. This will help us save space when writing to
// disk.
KeyValue kv = KeyValueUtil.ensureKeyValue(c);
if (kv.getMvccVersion() <= smallestReadPoint) {
// let us not change the original KV. It could be in the memstore
// changing its memstoreTS could affect other threads/scanners.
kv = kv.shallowCopy();
kv.setMvccVersion(0);
}
sink.append(kv);
flushed += MemStore.heapSizeChange(kv, true);
}
kvs.clear();
}
} while (hasMore);
return flushed;
}
示例4: performFlush
import org.apache.hadoop.hbase.regionserver.compactions.Compactor; //导入方法依赖的package包/类
/**
* Performs memstore flush, writing data from scanner into sink.
* @param scanner Scanner to get data from.
* @param sink Sink to write data to. Could be StoreFile.Writer.
* @param smallestReadPoint Smallest read point used for the flush.
* @return Bytes flushed.
s */
protected long performFlush(InternalScanner scanner,
Compactor.CellSink sink, long smallestReadPoint) throws IOException {
int compactionKVMax =
conf.getInt(HConstants.COMPACTION_KV_MAX, HConstants.COMPACTION_KV_MAX_DEFAULT);
List<Cell> kvs = new ArrayList<Cell>();
boolean hasMore;
long flushed = 0;
do {
hasMore = scanner.next(kvs, compactionKVMax);
if (!kvs.isEmpty()) {
for (Cell c : kvs) {
// If we know that this KV is going to be included always, then let us
// set its memstoreTS to 0. This will help us save space when writing to
// disk.
KeyValue kv = KeyValueUtil.ensureKeyValue(c);
if (kv.getMvccVersion() <= smallestReadPoint) {
// let us not change the original KV. It could be in the memstore
// changing its memstoreTS could affect other threads/scanners.
kv = kv.shallowCopy();
kv.setMvccVersion(0);
}
sink.append(kv);
flushed += MemStore.heapSizeChange(kv, true);
}
kvs.clear();
}
} while (hasMore);
return flushed;
}
示例5: performFlush
import org.apache.hadoop.hbase.regionserver.compactions.Compactor; //导入方法依赖的package包/类
/**
* Performs memstore flush, writing data from scanner into sink.
* @param scanner Scanner to get data from.
* @param sink Sink to write data to. Could be StoreFile.Writer.
* @param smallestReadPoint Smallest read point used for the flush.
*/
protected void performFlush(InternalScanner scanner,
Compactor.CellSink sink, long smallestReadPoint) throws IOException {
int compactionKVMax =
conf.getInt(HConstants.COMPACTION_KV_MAX, HConstants.COMPACTION_KV_MAX_DEFAULT);
List<Cell> kvs = new ArrayList<Cell>();
// Shen Li: init nextSplitRow
splitKeyIndex = 0;
nextSplitRow = store.getRegionInfo().getSplitKey(splitKeyIndex);
boolean hasMore;
do {
hasMore = scanner.next(kvs, compactionKVMax);
if (!kvs.isEmpty()) {
for (Cell c : kvs) {
// If we know that this KV is going to be included always, then let us
// set its memstoreTS to 0. This will help us save space when writing to
// disk.
KeyValue kv = KeyValueUtil.ensureKeyValue(c);
if (kv.getMvccVersion() <= smallestReadPoint) {
// let us not change the original KV. It could be in the memstore
// changing its memstoreTS could affect other threads/scanners.
kv = kv.shallowCopy();
kv.setMvccVersion(0);
}
// Shen Li: TODO check split boundary. use Store, if exceed boundary,
// call Store to seal block and reset replica group
//
// sink is a instance of StoreFile.Writer which has a
// HFile.Writer as a member variable
//
// HFile.Writer has a FSDataOutputStream member variable
// which can do seal, and set replica group operations.
//
if (shouldSeal(kv)) {
// the sealCurBlock will flush buffer before seal block
sink.sealCurBlock();
sink.setReplicaGroups(getReplicaNamespace(),
getReplicaGroups());
}
sink.append(kv);
}
kvs.clear();
}
} while (hasMore);
}
示例6: performFlush
import org.apache.hadoop.hbase.regionserver.compactions.Compactor; //导入方法依赖的package包/类
/**
* Performs memstore flush, writing data from scanner into sink.
*
* @param scanner Scanner to get data from.
* @param sink Sink to write data to. Could be StoreFile.Writer.
* @param smallestReadPoint Smallest read point used for the flush.
*/
@Override
protected void performFlush(InternalScanner scanner, Compactor.CellSink sink, long smallestReadPoint) throws IOException {
super.performFlush(scanner, sink, smallestReadPoint);
}