本文整理汇总了Java中org.apache.hadoop.hbase.regionserver.Store类的典型用法代码示例。如果您正苦于以下问题:Java Store类的具体用法?Java Store怎么用?Java Store使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
Store类属于org.apache.hadoop.hbase.regionserver包,在下文中一共展示了Store类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: preFlushScannerOpen
import org.apache.hadoop.hbase.regionserver.Store; //导入依赖的package包/类
@Override
public InternalScanner preFlushScannerOpen(
final ObserverContext<RegionCoprocessorEnvironment> c,
Store store, KeyValueScanner memstoreScanner, InternalScanner s) throws IOException {
Long newTtl = ttls.get(store.getTableName());
if (newTtl != null) {
System.out.println("PreFlush:" + newTtl);
}
Integer newVersions = versions.get(store.getTableName());
ScanInfo oldSI = store.getScanInfo();
HColumnDescriptor family = store.getFamily();
ScanInfo scanInfo = new ScanInfo(TEST_UTIL.getConfiguration(),
family.getName(), family.getMinVersions(),
newVersions == null ? family.getMaxVersions() : newVersions,
newTtl == null ? oldSI.getTtl() : newTtl, family.getKeepDeletedCells(),
oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());
Scan scan = new Scan();
scan.setMaxVersions(newVersions == null ? oldSI.getMaxVersions() : newVersions);
return new StoreScanner(store, scanInfo, scan, Collections.singletonList(memstoreScanner),
ScanType.COMPACT_RETAIN_DELETES, store.getSmallestReadPoint(),
HConstants.OLDEST_TIMESTAMP);
}
示例2: preCompactScannerOpen
import org.apache.hadoop.hbase.regionserver.Store; //导入依赖的package包/类
@Override
public InternalScanner preCompactScannerOpen(
final ObserverContext<RegionCoprocessorEnvironment> c,
Store store, List<? extends KeyValueScanner> scanners, ScanType scanType,
long earliestPutTs, InternalScanner s) throws IOException {
Long newTtl = ttls.get(store.getTableName());
Integer newVersions = versions.get(store.getTableName());
ScanInfo oldSI = store.getScanInfo();
HColumnDescriptor family = store.getFamily();
ScanInfo scanInfo = new ScanInfo(TEST_UTIL.getConfiguration(),
family.getName(), family.getMinVersions(),
newVersions == null ? family.getMaxVersions() : newVersions,
newTtl == null ? oldSI.getTtl() : newTtl, family.getKeepDeletedCells(),
oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());
Scan scan = new Scan();
scan.setMaxVersions(newVersions == null ? oldSI.getMaxVersions() : newVersions);
return new StoreScanner(store, scanInfo, scan, scanners, scanType,
store.getSmallestReadPoint(), earliestPutTs);
}
示例3: preStoreScannerOpen
import org.apache.hadoop.hbase.regionserver.Store; //导入依赖的package包/类
@Override
public KeyValueScanner preStoreScannerOpen(
final ObserverContext<RegionCoprocessorEnvironment> c, Store store, final Scan scan,
final NavigableSet<byte[]> targetCols, KeyValueScanner s) throws IOException {
TableName tn = store.getTableName();
if (!tn.isSystemTable()) {
Long newTtl = ttls.get(store.getTableName());
Integer newVersions = versions.get(store.getTableName());
ScanInfo oldSI = store.getScanInfo();
HColumnDescriptor family = store.getFamily();
ScanInfo scanInfo = new ScanInfo(TEST_UTIL.getConfiguration(),
family.getName(), family.getMinVersions(),
newVersions == null ? family.getMaxVersions() : newVersions,
newTtl == null ? oldSI.getTtl() : newTtl, family.getKeepDeletedCells(),
oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());
return new StoreScanner(store, scanInfo, scan, targetCols,
((HStore) store).getHRegion().getReadpoint(IsolationLevel.READ_COMMITTED));
} else {
return s;
}
}
示例4: testPurgeExpiredFiles
import org.apache.hadoop.hbase.regionserver.Store; //导入依赖的package包/类
@Test
public void testPurgeExpiredFiles() throws Exception {
Configuration conf = TEST_UTIL.getConfiguration();
conf.setInt(HStore.BLOCKING_STOREFILES_KEY, 10000);
TEST_UTIL.startMiniCluster(1);
try {
Store store = prepareData();
assertEquals(10, store.getStorefilesCount());
TEST_UTIL.getHBaseAdmin().majorCompact(tableName);
while (store.getStorefilesCount() > 1) {
Thread.sleep(100);
}
assertTrue(store.getStorefilesCount() == 1);
} finally {
TEST_UTIL.shutdownMiniCluster();
}
}
示例5: prepareData
import org.apache.hadoop.hbase.regionserver.Store; //导入依赖的package包/类
private Store prepareData() throws IOException {
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
if (admin.tableExists(tableName)) {
admin.disableTable(tableName);
admin.deleteTable(tableName);
}
HTable table = TEST_UTIL.createTable(tableName, family);
Random rand = new Random();
for (int i = 0; i < 10; i++) {
for (int j = 0; j < 10; j++) {
byte[] value = new byte[128 * 1024];
rand.nextBytes(value);
table.put(new Put(Bytes.toBytes(i * 10 + j)).add(family, qualifier, value));
}
admin.flush(tableName);
}
return getStoreWithName(tableName);
}
示例6: testCompactionWithoutThroughputLimit
import org.apache.hadoop.hbase.regionserver.Store; //导入依赖的package包/类
private long testCompactionWithoutThroughputLimit() throws Exception {
Configuration conf = TEST_UTIL.getConfiguration();
conf.set(StoreEngine.STORE_ENGINE_CLASS_KEY, DefaultStoreEngine.class.getName());
conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, 100);
conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_KEY, 200);
conf.setInt(HStore.BLOCKING_STOREFILES_KEY, 10000);
conf.set(CompactionThroughputControllerFactory.HBASE_THROUGHPUT_CONTROLLER_KEY,
NoLimitCompactionThroughputController.class.getName());
TEST_UTIL.startMiniCluster(1);
try {
Store store = prepareData();
assertEquals(10, store.getStorefilesCount());
long startTime = System.currentTimeMillis();
TEST_UTIL.getHBaseAdmin().majorCompact(tableName);
while (store.getStorefilesCount() != 1) {
Thread.sleep(20);
}
return System.currentTimeMillis() - startTime;
} finally {
TEST_UTIL.shutdownMiniCluster();
}
}
示例7: preCompactScannerOpen
import org.apache.hadoop.hbase.regionserver.Store; //导入依赖的package包/类
@Override
public InternalScanner preCompactScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c, Store store,
List<? extends KeyValueScanner> scanners, ScanType scanType, long earliestPutTs, InternalScanner s,
CompactionRequest request)
throws IOException {
// Get the latest tx snapshot state for the compaction
TransactionVisibilityState snapshot = cache.getLatestState();
// Record tx state before the compaction
if (compactionState != null) {
compactionState.record(request, snapshot);
}
// Also make sure to use the same snapshot for the compaction
return createStoreScanner(c.getEnvironment(), "compaction", snapshot, store, scanners, scanType, earliestPutTs);
}
示例8: createStoreScanner
import org.apache.hadoop.hbase.regionserver.Store; //导入依赖的package包/类
protected InternalScanner createStoreScanner(RegionCoprocessorEnvironment env, String action,
TransactionVisibilityState snapshot, Store store,
List<? extends KeyValueScanner> scanners, ScanType type,
long earliestPutTs) throws IOException {
if (snapshot == null) {
if (LOG.isDebugEnabled()) {
LOG.debug("Region " + env.getRegion().getRegionInfo().getRegionNameAsString() +
", no current transaction state found, defaulting to normal " + action + " scanner");
}
return null;
}
// construct a dummy transaction from the latest snapshot
Transaction dummyTx = TxUtils.createDummyTransaction(snapshot);
Scan scan = new Scan();
// need to see all versions, since we filter out excludes and applications may rely on multiple versions
scan.setMaxVersions();
scan.setFilter(
new IncludeInProgressFilter(dummyTx.getVisibilityUpperBound(),
snapshot.getInvalid(),
getTransactionFilter(dummyTx, type, null)));
return new StoreScanner(store, store.getScanInfo(), scan, scanners,
type, store.getSmallestReadPoint(), earliestPutTs);
}
示例9: createStoreScanner
import org.apache.hadoop.hbase.regionserver.Store; //导入依赖的package包/类
protected InternalScanner createStoreScanner(RegionCoprocessorEnvironment env, String action,
TransactionVisibilityState snapshot, Store store,
List<? extends KeyValueScanner> scanners, ScanType type,
long earliestPutTs) throws IOException {
if (snapshot == null) {
if (LOG.isDebugEnabled()) {
LOG.debug("Region " + env.getRegion().getRegionNameAsString() +
", no current transaction state found, defaulting to normal " + action + " scanner");
}
return null;
}
// construct a dummy transaction from the latest snapshot
Transaction dummyTx = TxUtils.createDummyTransaction(snapshot);
Scan scan = new Scan();
// need to see all versions, since we filter out excludes and applications may rely on multiple versions
scan.setMaxVersions();
scan.setFilter(
new IncludeInProgressFilter(dummyTx.getVisibilityUpperBound(),
snapshot.getInvalid(),
getTransactionFilter(dummyTx, type, null)));
return new StoreScanner(store, store.getScanInfo(), scan, scanners,
type, store.getSmallestReadPoint(), earliestPutTs);
}
示例10: preFlushScannerOpen
import org.apache.hadoop.hbase.regionserver.Store; //导入依赖的package包/类
@Override
public InternalScanner preFlushScannerOpen(
final ObserverContext<RegionCoprocessorEnvironment> c,
Store store, KeyValueScanner memstoreScanner, InternalScanner s) throws IOException {
Long newTtl = ttls.get(store.getTableName());
if (newTtl != null) {
System.out.println("PreFlush:" + newTtl);
}
Integer newVersions = versions.get(store.getTableName());
ScanInfo oldSI = store.getScanInfo();
HColumnDescriptor family = store.getFamily();
ScanInfo scanInfo = new ScanInfo(family.getName(), family.getMinVersions(),
newVersions == null ? family.getMaxVersions() : newVersions,
newTtl == null ? oldSI.getTtl() : newTtl, family.getKeepDeletedCells(),
oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());
Scan scan = new Scan();
scan.setMaxVersions(newVersions == null ? oldSI.getMaxVersions() : newVersions);
return new StoreScanner(store, scanInfo, scan, Collections.singletonList(memstoreScanner),
ScanType.COMPACT_RETAIN_DELETES, store.getSmallestReadPoint(),
HConstants.OLDEST_TIMESTAMP);
}
示例11: preStoreScannerOpen
import org.apache.hadoop.hbase.regionserver.Store; //导入依赖的package包/类
@Override
public KeyValueScanner preStoreScannerOpen(
final ObserverContext<RegionCoprocessorEnvironment> c, Store store, final Scan scan,
final NavigableSet<byte[]> targetCols, KeyValueScanner s) throws IOException {
TableName tn = store.getTableName();
if (!tn.isSystemTable()) {
Long newTtl = ttls.get(store.getTableName());
Integer newVersions = versions.get(store.getTableName());
ScanInfo oldSI = store.getScanInfo();
HColumnDescriptor family = store.getFamily();
ScanInfo scanInfo = new ScanInfo(family.getName(), family.getMinVersions(),
newVersions == null ? family.getMaxVersions() : newVersions,
newTtl == null ? oldSI.getTtl() : newTtl, family.getKeepDeletedCells(),
oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());
return new StoreScanner(store, scanInfo, scan, targetCols,
((HStore) store).getHRegion().getReadpoint(IsolationLevel.READ_COMMITTED));
} else {
return s;
}
}
示例12: preFlushScannerOpen
import org.apache.hadoop.hbase.regionserver.Store; //导入依赖的package包/类
@Override
public InternalScanner preFlushScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
Store store, KeyValueScanner memstoreScanner, InternalScanner s) throws IOException {
Long newTtl = ttls.get(store.getTableName());
if (newTtl != null) {
System.out.println("PreFlush:" + newTtl);
}
Integer newVersions = versions.get(store.getTableName());
Store.ScanInfo oldSI = store.getScanInfo();
HColumnDescriptor family = store.getFamily();
Store.ScanInfo scanInfo = new Store.ScanInfo(family.getName(), family.getMinVersions(),
newVersions == null ? family.getMaxVersions() : newVersions,
newTtl == null ? oldSI.getTtl() : newTtl, family.getKeepDeletedCells(),
oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());
Scan scan = new Scan();
scan.setMaxVersions(newVersions == null ? oldSI.getMaxVersions() : newVersions);
return new StoreScanner(store, scanInfo, scan, Collections.singletonList(memstoreScanner),
ScanType.MINOR_COMPACT, store.getHRegion().getSmallestReadPoint(),
HConstants.OLDEST_TIMESTAMP);
}
示例13: preCompact
import org.apache.hadoop.hbase.regionserver.Store; //导入依赖的package包/类
@Override
public InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment> e,
final Store store, final InternalScanner scanner, final ScanType scanType)
throws IOException {
requirePermission("compact", getTableName(e.getEnvironment()), null, null, Action.ADMIN,
Action.CREATE);
return scanner;
}
示例14: preCompactScannerOpen
import org.apache.hadoop.hbase.regionserver.Store; //导入依赖的package包/类
@Override
public InternalScanner preCompactScannerOpen(
final ObserverContext<RegionCoprocessorEnvironment> c, final Store store,
List<? extends KeyValueScanner> scanners, final ScanType scanType, final long earliestPutTs,
final InternalScanner s) throws IOException {
return s;
}
示例15: addRegion
import org.apache.hadoop.hbase.regionserver.Store; //导入依赖的package包/类
/**
* Creates a 'manifest' for the specified region, by reading directly from the HRegion object.
* This is used by the "online snapshot" when the table is enabled.
*/
public void addRegion(final HRegion region) throws IOException {
// 0. Get the ManifestBuilder/RegionVisitor
RegionVisitor visitor = createRegionVisitor(desc);
// 1. dump region meta info into the snapshot directory
LOG.debug("Storing '" + region + "' region-info for snapshot.");
Object regionData = visitor.regionOpen(region.getRegionInfo());
monitor.rethrowException();
// 2. iterate through all the stores in the region
LOG.debug("Creating references for hfiles");
for (Store store : region.getStores()) {
// 2.1. build the snapshot reference for the store
Object familyData = visitor.familyOpen(regionData, store.getFamily().getName());
monitor.rethrowException();
List<StoreFile> storeFiles = new ArrayList<StoreFile>(store.getStorefiles());
if (LOG.isDebugEnabled()) {
LOG.debug("Adding snapshot references for " + storeFiles + " hfiles");
}
// 2.2. iterate through all the store's files and create "references".
for (int i = 0, sz = storeFiles.size(); i < sz; i++) {
StoreFile storeFile = storeFiles.get(i);
monitor.rethrowException();
// create "reference" to this store file.
LOG.debug("Adding reference for file (" + (i+1) + "/" + sz + "): " + storeFile.getPath());
visitor.storeFile(regionData, familyData, storeFile.getFileInfo());
}
visitor.familyClose(regionData, familyData);
}
visitor.regionClose(regionData);
}