本文整理匯總了Java中org.apache.hadoop.hbase.client.Scan.setCacheBlocks方法的典型用法代碼示例。如果您正苦於以下問題:Java Scan.setCacheBlocks方法的具體用法?Java Scan.setCacheBlocks怎麽用?Java Scan.setCacheBlocks使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.hbase.client.Scan
的用法示例。
在下文中一共展示了Scan.setCacheBlocks方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: copyTable
import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
/**
* 拷貝表
*
* @throws IOException
*/
public static void copyTable(String oldTableName, String newTableName,String ColumnFamily, String ColumnName)throws IOException {
if(CreateNewTable(newTableName))
logger.info("創建表"+newTableName+"表成功");
else{
logger.info("創建表"+newTableName+"表失敗");
}
Scan s = new Scan();
s.addColumn(Bytes.toBytes(ColumnFamily), Bytes.toBytes(ColumnName));
s.setMaxVersions(1);
s.setCacheBlocks(false);
ResultScanner rs = hbase_table.getScanner(s);
HTableInterface hbase_table_new = conn.getTable(newTableName);
for (Result r : rs) {
byte[] key = r.getRow();
byte[] value = r.getValue(Bytes.toBytes(ColumnFamily), Bytes.toBytes(ColumnName));
Put put = new Put(key);
put.add(Bytes.toBytes(ColumnFamily), Bytes.toBytes(ColumnName), value);
hbase_table_new.put(put);
}
rs.close();
hbase_table_new.close();
}
示例2: getConfiguredScanForJob
import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
private static Scan getConfiguredScanForJob(Configuration conf, String[] args) throws IOException {
Scan s = new Scan();
// Set Scan Versions
s.setMaxVersions(Integer.MAX_VALUE);
s.setCacheBlocks(false);
// Set Scan Column Family
if (conf.get(TableInputFormat.SCAN_COLUMN_FAMILY) != null) {
s.addFamily(Bytes.toBytes(conf.get(TableInputFormat.SCAN_COLUMN_FAMILY)));
}
// Set RowFilter or Prefix Filter if applicable.
Filter rowFilter = getRowFilter(args);
if (rowFilter!= null) {
LOG.info("Setting Row Filter for counter.");
s.setFilter(rowFilter);
}
// Set TimeRange if defined
long timeRange[] = getTimeRange(args);
if (timeRange != null) {
LOG.info("Setting TimeRange for counter.");
s.setTimeRange(timeRange[0], timeRange[1]);
}
return s;
}
示例3: initScan
import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
Scan initScan() throws IOException {
Scan scan = new Scan();
scan.setCacheBlocks(false);
if (startTime != 0 || endTime != 0) {
scan.setTimeRange(startTime, endTime == 0 ? HConstants.LATEST_TIMESTAMP : endTime);
}
if (scanBatch > 0) {
scan.setBatch(scanBatch);
}
if (versions >= 0) {
scan.setMaxVersions(versions);
}
if (!isTableStartRow(startRow)) {
scan.setStartRow(startRow);
}
if (!isTableEndRow(stopRow)) {
scan.setStopRow(stopRow);
}
if(families != null) {
for(String fam : families.split(",")) {
scan.addFamily(Bytes.toBytes(fam));
}
}
return scan;
}
示例4: getNextScanner
import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
private ResultScanner getNextScanner() throws IOException {
if (INIT_REGION_SIZE != getRegionNumber()) {
throw new IOException(
"region number changed from " + INIT_REGION_SIZE + " to " + getRegionNumber());
}
if (regionLocationQueue.isEmpty()) return null;
HRegionLocation regionLocation = regionLocationQueue.poll();
Scan newScan = new Scan(rawScan);
byte[] key = regionLocation.getRegionInfo().getStartKey();
if (key != null && key.length > 0) newScan.setStartRow(key);
key = regionLocation.getRegionInfo().getEndKey();
if (key != null && key.length > 0) newScan.setStopRow(key);
newScan.setAttribute(IndexConstants.SCAN_WITH_INDEX, Bytes.toBytes("Hi"));
newScan.setId(rawScan.getId());
newScan.setCacheBlocks(rawScan.getCacheBlocks());
newScan.setCaching(rawScan.getCaching());
return table.getScanner(newScan);
}
示例5: getNextScanner
import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
private ResultScanner getNextScanner() throws IOException {
if (INIT_REGION_SIZE != getRegionNumber()) {
throw new IOException(
"region number changed from " + INIT_REGION_SIZE + " to " + getRegionNumber());
}
if (regionLocationQueue.isEmpty()) return null;
HRegionLocation regionLocation = regionLocationQueue.poll();
Scan newScan = new Scan(rawScan);
if (regionLocation.getRegionInfo().getStartKey() != null)
newScan.setStartRow(regionLocation.getRegionInfo().getStartKey());
if (regionLocation.getRegionInfo().getEndKey() != null)
newScan.setStopRow(regionLocation.getRegionInfo().getEndKey());
newScan.setAttribute(IndexConstants.SCAN_WITH_INDEX, Bytes.toBytes("Hi"));
newScan.setFilter(rangeList.toFilterList());
newScan.setAttribute(IndexConstants.MAX_SCAN_SCALE, Bytes.toBytes(1.0f));
newScan.setId(rawScan.getId());
newScan.setCacheBlocks(rawScan.getCacheBlocks());
newScan.setCaching(rawScan.getCaching());
return table.getScanner(newScan);
}
示例6: SingleScanner
import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
public SingleScanner(Scan scan, byte[][] resultColumns, Table table) throws IOException {
this.scan = scan;
this.resultColumns = resultColumns;
this.indexTable = table;
this.currentStartKey = scan.getStartRow();
this.scanning = true;
this.finished = false;
scan.setCacheBlocks(false);
this.resultScanner = table.getScanner(scan);
LOG.debug("scan caching:" + scan.getCaching());
}
示例7: getScan
import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
protected Scan getScan() {
Scan scan = new Scan();
scan.setAttribute(ScanRange.SCAN_RANGE_ATTRIBUTE_STR, scanRangeList.toBytesAttribute());
scan.setFilter(scanRangeList.toFilterList());
scan.setCacheBlocks(false);
return scan;
}
示例8: executeScan
import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
private OpResult executeScan() throws IOException, ParseException {
if (!hasScan()) {
return new OpResult("scan not supported", 1, 1);
}
Table table = conn.getTable(opTblName);
BufferedReader br = new BufferedReader(new FileReader(scanFilePath));
String line;
long totalTime = 0;
int counter = 0;
Result[] results;
while ((line = br.readLine()) != null) {
Scan scan = new Scan(getIndexTableScanStartKey(line));
scan.setCaching(workload.getScanCacheSize());
scan.setCacheBlocks(false);
long startTime = System.currentTimeMillis();
ResultScanner scanner = table.getScanner(scan);
int wantedRecords = sizeScanCovering;
while (true) {
results = scanner.next(Math.min(wantedRecords, workload.getScanCacheSize()));
if (results == null || results.length == 0) break;
for (Result result : results) {
int k = recordsInOneResult(result);
wantedRecords -= k;
counter += k;
}
if (wantedRecords <= 0) break;
}
scanner.close();
totalTime += System.currentTimeMillis() - startTime;
}
OpResult ret = new OpResult("scan", counter, totalTime);
br.close();
table.close();
return ret;
}
示例9: createGCScanner
import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
private GCScanner createGCScanner(ScanRange selectedRange) throws IOException {
List<ScanRange> list = new ArrayList<>(rangeList.getRanges());
list.remove(selectedRange);
Scan scan = new Scan();
scan.setStartRow(selectedRange.getStart());
scan.setStopRow(selectedRange.getStop());
scan.setCaching(rawScan.getCaching());
scan.setCacheBlocks(rawScan.getCacheBlocks());
scan.setFilter(new ScanRange.ScanRangeList(list).toFilterList());
Table table = conn.getTable(
relation.getIndexTableName(selectedRange.getFamily(), selectedRange.getQualifier()));
ResultScanner scanner = table.getScanner(scan);
return new GCScanner(this, scanner, selectedRange.getFamily(), selectedRange.getQualifier());
}
示例10: processGet
import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
@Override protected Result processGet(Table table, Get get) throws IOException {
Scan scan = new Scan();
scan.addFamily(MDHBaseAdmin.BUCKET_FAMILY);
scan.setReversed(true);
scan.setStartRow(get.getRow());
scan.setCacheBlocks(false);
scan.setCaching(1);
scan.setSmall(true);
ResultScanner scanner = table.getScanner(scan);
Result ret = scanner.next();
scanner.close();
return ret;
}
示例11: createScanOnMemstore
import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
/**
* create scan on memstore
*/
private Scan createScanOnMemstore(Scan rawScan, ScanRange.ScanRangeList allRangeList)
throws IOException {
Scan scan = new Scan(rawScan);
scan.setCacheBlocks(false);
scan.setFilter(allRangeList.toFilterList());
return scan;
}
示例12: fullScanAndCount
import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
private int fullScanAndCount(final TableName tableName, final ScanInjector injector)
throws Exception {
Table table = UTIL.getConnection().getTable(tableName);
int count = 0;
try {
Scan scan = new Scan();
scan.setCaching(1);
scan.setCacheBlocks(false);
injector.beforeScan(table, scan);
ResultScanner scanner = table.getScanner(scan);
try {
while (true) {
injector.beforeScanNext(table);
Result result = scanner.next();
injector.afterScanNext(table, result);
if (result == null) break;
if ((count++ % (ROW_PER_FILE / 2)) == 0) {
LOG.debug("scan next " + count);
}
}
} finally {
scanner.close();
injector.afterScan(table);
}
} finally {
table.close();
}
return count;
}
示例13: getScan
import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
private Scan getScan() {
Scan scan = new Scan(); // default scan settings
scan.setCacheBlocks(false);
scan.setMaxVersions(1);
scan.setScanMetricsEnabled(true);
if (caching != null) {
scan.setCaching(Integer.parseInt(caching));
}
return scan;
}
示例14: testReverseScanWithPadding
import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
@Test
public void testReverseScanWithPadding() throws Exception {
byte[] terminator = new byte[] { -1 };
byte[] row1 = Bytes.add(invert(Bytes.toBytes("a")), terminator);
byte[] row2 = Bytes.add(invert(Bytes.toBytes("ab")), terminator);
byte[] row3 = Bytes.add(invert(Bytes.toBytes("b")), terminator);
Put put1 = new Put(row1);
put1.addColumn(cfName, cqName, HConstants.EMPTY_BYTE_ARRAY);
Put put2 = new Put(row2);
put2.addColumn(cfName, cqName, HConstants.EMPTY_BYTE_ARRAY);
Put put3 = new Put(row3);
put3.addColumn(cfName, cqName, HConstants.EMPTY_BYTE_ARRAY);
region.put(put1);
region.put(put2);
region.put(put3);
region.flush(true);
Scan scan = new Scan();
scan.setCacheBlocks(false);
scan.setReversed(true);
scan.setFilter(new FirstKeyOnlyFilter());
scan.addFamily(cfName);
RegionScanner scanner = region.getScanner(scan);
List<Cell> res = new ArrayList<Cell>();
int count = 1;
while (scanner.next(res)) {
count++;
}
assertEquals(3, count);
}
示例15: runCheck
import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
/**
* After adding data to the table start a mr job to
* @throws IOException
* @throws ClassNotFoundException
* @throws InterruptedException
*/
private void runCheck() throws IOException, ClassNotFoundException, InterruptedException {
LOG.info("Running check");
Configuration conf = getConf();
String jobName = getTablename() + "_check" + EnvironmentEdgeManager.currentTime();
Path p = util.getDataTestDirOnTestFS(jobName);
Job job = new Job(conf);
job.setJarByClass(getClass());
job.setJobName(jobName);
job.setPartitionerClass(NaturalKeyPartitioner.class);
job.setGroupingComparatorClass(NaturalKeyGroupingComparator.class);
job.setSortComparatorClass(CompositeKeyComparator.class);
Scan scan = new Scan();
scan.addFamily(CHAIN_FAM);
scan.addFamily(SORT_FAM);
scan.setMaxVersions(1);
scan.setCacheBlocks(false);
scan.setBatch(1000);
int replicaCount = conf.getInt(NUM_REPLICA_COUNT_KEY, NUM_REPLICA_COUNT_DEFAULT);
if (replicaCount != NUM_REPLICA_COUNT_DEFAULT) {
scan.setConsistency(Consistency.TIMELINE);
}
TableMapReduceUtil.initTableMapperJob(
getTablename().getName(),
scan,
LinkedListCheckingMapper.class,
LinkKey.class,
LinkChain.class,
job
);
job.setReducerClass(LinkedListCheckingReducer.class);
job.setOutputKeyClass(NullWritable.class);
job.setOutputValueClass(NullWritable.class);
FileOutputFormat.setOutputPath(job, p);
assertEquals(true, job.waitForCompletion(true));
// Delete the files.
util.getTestFileSystem().delete(p, true);
}