本文整理汇总了Java中org.apache.hadoop.hbase.client.Scan.setCacheBlocks方法的典型用法代码示例。如果您正苦于以下问题:Java Scan.setCacheBlocks方法的具体用法?Java Scan.setCacheBlocks怎么用?Java Scan.setCacheBlocks使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.client.Scan
的用法示例。
在下文中一共展示了Scan.setCacheBlocks方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: copyTable
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
/**
* 拷贝表
*
* @throws IOException
*/
public static void copyTable(String oldTableName, String newTableName,String ColumnFamily, String ColumnName)throws IOException {
if(CreateNewTable(newTableName))
logger.info("创建表"+newTableName+"表成功");
else{
logger.info("创建表"+newTableName+"表失败");
}
Scan s = new Scan();
s.addColumn(Bytes.toBytes(ColumnFamily), Bytes.toBytes(ColumnName));
s.setMaxVersions(1);
s.setCacheBlocks(false);
ResultScanner rs = hbase_table.getScanner(s);
HTableInterface hbase_table_new = conn.getTable(newTableName);
for (Result r : rs) {
byte[] key = r.getRow();
byte[] value = r.getValue(Bytes.toBytes(ColumnFamily), Bytes.toBytes(ColumnName));
Put put = new Put(key);
put.add(Bytes.toBytes(ColumnFamily), Bytes.toBytes(ColumnName), value);
hbase_table_new.put(put);
}
rs.close();
hbase_table_new.close();
}
示例2: getConfiguredScanForJob
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
private static Scan getConfiguredScanForJob(Configuration conf, String[] args) throws IOException {
Scan s = new Scan();
// Set Scan Versions
s.setMaxVersions(Integer.MAX_VALUE);
s.setCacheBlocks(false);
// Set Scan Column Family
if (conf.get(TableInputFormat.SCAN_COLUMN_FAMILY) != null) {
s.addFamily(Bytes.toBytes(conf.get(TableInputFormat.SCAN_COLUMN_FAMILY)));
}
// Set RowFilter or Prefix Filter if applicable.
Filter rowFilter = getRowFilter(args);
if (rowFilter!= null) {
LOG.info("Setting Row Filter for counter.");
s.setFilter(rowFilter);
}
// Set TimeRange if defined
long timeRange[] = getTimeRange(args);
if (timeRange != null) {
LOG.info("Setting TimeRange for counter.");
s.setTimeRange(timeRange[0], timeRange[1]);
}
return s;
}
示例3: initScan
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
Scan initScan() throws IOException {
Scan scan = new Scan();
scan.setCacheBlocks(false);
if (startTime != 0 || endTime != 0) {
scan.setTimeRange(startTime, endTime == 0 ? HConstants.LATEST_TIMESTAMP : endTime);
}
if (scanBatch > 0) {
scan.setBatch(scanBatch);
}
if (versions >= 0) {
scan.setMaxVersions(versions);
}
if (!isTableStartRow(startRow)) {
scan.setStartRow(startRow);
}
if (!isTableEndRow(stopRow)) {
scan.setStopRow(stopRow);
}
if(families != null) {
for(String fam : families.split(",")) {
scan.addFamily(Bytes.toBytes(fam));
}
}
return scan;
}
示例4: getNextScanner
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
private ResultScanner getNextScanner() throws IOException {
if (INIT_REGION_SIZE != getRegionNumber()) {
throw new IOException(
"region number changed from " + INIT_REGION_SIZE + " to " + getRegionNumber());
}
if (regionLocationQueue.isEmpty()) return null;
HRegionLocation regionLocation = regionLocationQueue.poll();
Scan newScan = new Scan(rawScan);
byte[] key = regionLocation.getRegionInfo().getStartKey();
if (key != null && key.length > 0) newScan.setStartRow(key);
key = regionLocation.getRegionInfo().getEndKey();
if (key != null && key.length > 0) newScan.setStopRow(key);
newScan.setAttribute(IndexConstants.SCAN_WITH_INDEX, Bytes.toBytes("Hi"));
newScan.setId(rawScan.getId());
newScan.setCacheBlocks(rawScan.getCacheBlocks());
newScan.setCaching(rawScan.getCaching());
return table.getScanner(newScan);
}
示例5: getNextScanner
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
private ResultScanner getNextScanner() throws IOException {
if (INIT_REGION_SIZE != getRegionNumber()) {
throw new IOException(
"region number changed from " + INIT_REGION_SIZE + " to " + getRegionNumber());
}
if (regionLocationQueue.isEmpty()) return null;
HRegionLocation regionLocation = regionLocationQueue.poll();
Scan newScan = new Scan(rawScan);
if (regionLocation.getRegionInfo().getStartKey() != null)
newScan.setStartRow(regionLocation.getRegionInfo().getStartKey());
if (regionLocation.getRegionInfo().getEndKey() != null)
newScan.setStopRow(regionLocation.getRegionInfo().getEndKey());
newScan.setAttribute(IndexConstants.SCAN_WITH_INDEX, Bytes.toBytes("Hi"));
newScan.setFilter(rangeList.toFilterList());
newScan.setAttribute(IndexConstants.MAX_SCAN_SCALE, Bytes.toBytes(1.0f));
newScan.setId(rawScan.getId());
newScan.setCacheBlocks(rawScan.getCacheBlocks());
newScan.setCaching(rawScan.getCaching());
return table.getScanner(newScan);
}
示例6: SingleScanner
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
public SingleScanner(Scan scan, byte[][] resultColumns, Table table) throws IOException {
this.scan = scan;
this.resultColumns = resultColumns;
this.indexTable = table;
this.currentStartKey = scan.getStartRow();
this.scanning = true;
this.finished = false;
scan.setCacheBlocks(false);
this.resultScanner = table.getScanner(scan);
LOG.debug("scan caching:" + scan.getCaching());
}
示例7: getScan
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
protected Scan getScan() {
Scan scan = new Scan();
scan.setAttribute(ScanRange.SCAN_RANGE_ATTRIBUTE_STR, scanRangeList.toBytesAttribute());
scan.setFilter(scanRangeList.toFilterList());
scan.setCacheBlocks(false);
return scan;
}
示例8: executeScan
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
private OpResult executeScan() throws IOException, ParseException {
if (!hasScan()) {
return new OpResult("scan not supported", 1, 1);
}
Table table = conn.getTable(opTblName);
BufferedReader br = new BufferedReader(new FileReader(scanFilePath));
String line;
long totalTime = 0;
int counter = 0;
Result[] results;
while ((line = br.readLine()) != null) {
Scan scan = new Scan(getIndexTableScanStartKey(line));
scan.setCaching(workload.getScanCacheSize());
scan.setCacheBlocks(false);
long startTime = System.currentTimeMillis();
ResultScanner scanner = table.getScanner(scan);
int wantedRecords = sizeScanCovering;
while (true) {
results = scanner.next(Math.min(wantedRecords, workload.getScanCacheSize()));
if (results == null || results.length == 0) break;
for (Result result : results) {
int k = recordsInOneResult(result);
wantedRecords -= k;
counter += k;
}
if (wantedRecords <= 0) break;
}
scanner.close();
totalTime += System.currentTimeMillis() - startTime;
}
OpResult ret = new OpResult("scan", counter, totalTime);
br.close();
table.close();
return ret;
}
示例9: createGCScanner
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
private GCScanner createGCScanner(ScanRange selectedRange) throws IOException {
List<ScanRange> list = new ArrayList<>(rangeList.getRanges());
list.remove(selectedRange);
Scan scan = new Scan();
scan.setStartRow(selectedRange.getStart());
scan.setStopRow(selectedRange.getStop());
scan.setCaching(rawScan.getCaching());
scan.setCacheBlocks(rawScan.getCacheBlocks());
scan.setFilter(new ScanRange.ScanRangeList(list).toFilterList());
Table table = conn.getTable(
relation.getIndexTableName(selectedRange.getFamily(), selectedRange.getQualifier()));
ResultScanner scanner = table.getScanner(scan);
return new GCScanner(this, scanner, selectedRange.getFamily(), selectedRange.getQualifier());
}
示例10: processGet
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
@Override protected Result processGet(Table table, Get get) throws IOException {
Scan scan = new Scan();
scan.addFamily(MDHBaseAdmin.BUCKET_FAMILY);
scan.setReversed(true);
scan.setStartRow(get.getRow());
scan.setCacheBlocks(false);
scan.setCaching(1);
scan.setSmall(true);
ResultScanner scanner = table.getScanner(scan);
Result ret = scanner.next();
scanner.close();
return ret;
}
示例11: createScanOnMemstore
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
/**
* create scan on memstore
*/
private Scan createScanOnMemstore(Scan rawScan, ScanRange.ScanRangeList allRangeList)
throws IOException {
Scan scan = new Scan(rawScan);
scan.setCacheBlocks(false);
scan.setFilter(allRangeList.toFilterList());
return scan;
}
示例12: fullScanAndCount
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
private int fullScanAndCount(final TableName tableName, final ScanInjector injector)
throws Exception {
Table table = UTIL.getConnection().getTable(tableName);
int count = 0;
try {
Scan scan = new Scan();
scan.setCaching(1);
scan.setCacheBlocks(false);
injector.beforeScan(table, scan);
ResultScanner scanner = table.getScanner(scan);
try {
while (true) {
injector.beforeScanNext(table);
Result result = scanner.next();
injector.afterScanNext(table, result);
if (result == null) break;
if ((count++ % (ROW_PER_FILE / 2)) == 0) {
LOG.debug("scan next " + count);
}
}
} finally {
scanner.close();
injector.afterScan(table);
}
} finally {
table.close();
}
return count;
}
示例13: getScan
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
private Scan getScan() {
Scan scan = new Scan(); // default scan settings
scan.setCacheBlocks(false);
scan.setMaxVersions(1);
scan.setScanMetricsEnabled(true);
if (caching != null) {
scan.setCaching(Integer.parseInt(caching));
}
return scan;
}
示例14: testReverseScanWithPadding
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
@Test
public void testReverseScanWithPadding() throws Exception {
byte[] terminator = new byte[] { -1 };
byte[] row1 = Bytes.add(invert(Bytes.toBytes("a")), terminator);
byte[] row2 = Bytes.add(invert(Bytes.toBytes("ab")), terminator);
byte[] row3 = Bytes.add(invert(Bytes.toBytes("b")), terminator);
Put put1 = new Put(row1);
put1.addColumn(cfName, cqName, HConstants.EMPTY_BYTE_ARRAY);
Put put2 = new Put(row2);
put2.addColumn(cfName, cqName, HConstants.EMPTY_BYTE_ARRAY);
Put put3 = new Put(row3);
put3.addColumn(cfName, cqName, HConstants.EMPTY_BYTE_ARRAY);
region.put(put1);
region.put(put2);
region.put(put3);
region.flush(true);
Scan scan = new Scan();
scan.setCacheBlocks(false);
scan.setReversed(true);
scan.setFilter(new FirstKeyOnlyFilter());
scan.addFamily(cfName);
RegionScanner scanner = region.getScanner(scan);
List<Cell> res = new ArrayList<Cell>();
int count = 1;
while (scanner.next(res)) {
count++;
}
assertEquals(3, count);
}
示例15: runCheck
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
/**
* After adding data to the table start a mr job to
* @throws IOException
* @throws ClassNotFoundException
* @throws InterruptedException
*/
private void runCheck() throws IOException, ClassNotFoundException, InterruptedException {
LOG.info("Running check");
Configuration conf = getConf();
String jobName = getTablename() + "_check" + EnvironmentEdgeManager.currentTime();
Path p = util.getDataTestDirOnTestFS(jobName);
Job job = new Job(conf);
job.setJarByClass(getClass());
job.setJobName(jobName);
job.setPartitionerClass(NaturalKeyPartitioner.class);
job.setGroupingComparatorClass(NaturalKeyGroupingComparator.class);
job.setSortComparatorClass(CompositeKeyComparator.class);
Scan scan = new Scan();
scan.addFamily(CHAIN_FAM);
scan.addFamily(SORT_FAM);
scan.setMaxVersions(1);
scan.setCacheBlocks(false);
scan.setBatch(1000);
int replicaCount = conf.getInt(NUM_REPLICA_COUNT_KEY, NUM_REPLICA_COUNT_DEFAULT);
if (replicaCount != NUM_REPLICA_COUNT_DEFAULT) {
scan.setConsistency(Consistency.TIMELINE);
}
TableMapReduceUtil.initTableMapperJob(
getTablename().getName(),
scan,
LinkedListCheckingMapper.class,
LinkKey.class,
LinkChain.class,
job
);
job.setReducerClass(LinkedListCheckingReducer.class);
job.setOutputKeyClass(NullWritable.class);
job.setOutputValueClass(NullWritable.class);
FileOutputFormat.setOutputPath(job, p);
assertEquals(true, job.waitForCompletion(true));
// Delete the files.
util.getTestFileSystem().delete(p, true);
}