本文整理匯總了Java中org.apache.hadoop.hbase.HColumnDescriptor.getMaxVersions方法的典型用法代碼示例。如果您正苦於以下問題:Java HColumnDescriptor.getMaxVersions方法的具體用法?Java HColumnDescriptor.getMaxVersions怎麽用?Java HColumnDescriptor.getMaxVersions使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.hbase.HColumnDescriptor
的用法示例。
在下文中一共展示了HColumnDescriptor.getMaxVersions方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: preFlushScannerOpen
import org.apache.hadoop.hbase.HColumnDescriptor; //導入方法依賴的package包/類
@Override
public InternalScanner preFlushScannerOpen(
final ObserverContext<RegionCoprocessorEnvironment> c,
Store store, KeyValueScanner memstoreScanner, InternalScanner s) throws IOException {
Long newTtl = ttls.get(store.getTableName());
if (newTtl != null) {
System.out.println("PreFlush:" + newTtl);
}
Integer newVersions = versions.get(store.getTableName());
ScanInfo oldSI = store.getScanInfo();
HColumnDescriptor family = store.getFamily();
ScanInfo scanInfo = new ScanInfo(TEST_UTIL.getConfiguration(),
family.getName(), family.getMinVersions(),
newVersions == null ? family.getMaxVersions() : newVersions,
newTtl == null ? oldSI.getTtl() : newTtl, family.getKeepDeletedCells(),
oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());
Scan scan = new Scan();
scan.setMaxVersions(newVersions == null ? oldSI.getMaxVersions() : newVersions);
return new StoreScanner(store, scanInfo, scan, Collections.singletonList(memstoreScanner),
ScanType.COMPACT_RETAIN_DELETES, store.getSmallestReadPoint(),
HConstants.OLDEST_TIMESTAMP);
}
示例2: preCompactScannerOpen
import org.apache.hadoop.hbase.HColumnDescriptor; //導入方法依賴的package包/類
@Override
public InternalScanner preCompactScannerOpen(
final ObserverContext<RegionCoprocessorEnvironment> c,
Store store, List<? extends KeyValueScanner> scanners, ScanType scanType,
long earliestPutTs, InternalScanner s) throws IOException {
Long newTtl = ttls.get(store.getTableName());
Integer newVersions = versions.get(store.getTableName());
ScanInfo oldSI = store.getScanInfo();
HColumnDescriptor family = store.getFamily();
ScanInfo scanInfo = new ScanInfo(TEST_UTIL.getConfiguration(),
family.getName(), family.getMinVersions(),
newVersions == null ? family.getMaxVersions() : newVersions,
newTtl == null ? oldSI.getTtl() : newTtl, family.getKeepDeletedCells(),
oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());
Scan scan = new Scan();
scan.setMaxVersions(newVersions == null ? oldSI.getMaxVersions() : newVersions);
return new StoreScanner(store, scanInfo, scan, scanners, scanType,
store.getSmallestReadPoint(), earliestPutTs);
}
示例3: preStoreScannerOpen
import org.apache.hadoop.hbase.HColumnDescriptor; //導入方法依賴的package包/類
@Override
public KeyValueScanner preStoreScannerOpen(
final ObserverContext<RegionCoprocessorEnvironment> c, Store store, final Scan scan,
final NavigableSet<byte[]> targetCols, KeyValueScanner s) throws IOException {
TableName tn = store.getTableName();
if (!tn.isSystemTable()) {
Long newTtl = ttls.get(store.getTableName());
Integer newVersions = versions.get(store.getTableName());
ScanInfo oldSI = store.getScanInfo();
HColumnDescriptor family = store.getFamily();
ScanInfo scanInfo = new ScanInfo(TEST_UTIL.getConfiguration(),
family.getName(), family.getMinVersions(),
newVersions == null ? family.getMaxVersions() : newVersions,
newTtl == null ? oldSI.getTtl() : newTtl, family.getKeepDeletedCells(),
oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());
return new StoreScanner(store, scanInfo, scan, targetCols,
((HStore) store).getHRegion().getReadpoint(IsolationLevel.READ_COMMITTED));
} else {
return s;
}
}
示例4: colDescFromHbase
import org.apache.hadoop.hbase.HColumnDescriptor; //導入方法依賴的package包/類
/**
* This utility method creates a new Thrift ColumnDescriptor "struct" based on
* an Hbase HColumnDescriptor object.
*
* @param in
* Hbase HColumnDescriptor object
* @return Thrift ColumnDescriptor
*/
static public ColumnDescriptor colDescFromHbase(HColumnDescriptor in) {
ColumnDescriptor col = new ColumnDescriptor();
col.name = ByteBuffer.wrap(Bytes.add(in.getName(), KeyValue.COLUMN_FAMILY_DELIM_ARRAY));
col.maxVersions = in.getMaxVersions();
col.compression = in.getCompression().toString();
col.inMemory = in.isInMemory();
col.blockCacheEnabled = in.isBlockCacheEnabled();
col.bloomFilterType = in.getBloomFilterType().toString();
return col;
}
示例5: ScanInfo
import org.apache.hadoop.hbase.HColumnDescriptor; //導入方法依賴的package包/類
/**
* @param conf
* @param family {@link HColumnDescriptor} describing the column family
* @param ttl Store's TTL (in ms)
* @param timeToPurgeDeletes duration in ms after which a delete marker can
* be purged during a major compaction.
* @param comparator The store's comparator
*/
public ScanInfo(final Configuration conf, final HColumnDescriptor family, final long ttl,
final long timeToPurgeDeletes, final KVComparator comparator) {
this(conf, family.getName(), family.getMinVersions(), family.getMaxVersions(), ttl,
family.getKeepDeletedCells(), timeToPurgeDeletes, comparator);
}