本文整理汇总了Java中org.apache.hadoop.hbase.client.metrics.ScanMetrics类的典型用法代码示例。如果您正苦于以下问题:Java ScanMetrics类的具体用法?Java ScanMetrics怎么用?Java ScanMetrics使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
ScanMetrics类属于org.apache.hadoop.hbase.client.metrics包,在下文中一共展示了ScanMetrics类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: ClientSideRegionScanner
import org.apache.hadoop.hbase.client.metrics.ScanMetrics; //导入依赖的package包/类
public ClientSideRegionScanner(Configuration conf, FileSystem fs,
Path rootDir, HTableDescriptor htd, HRegionInfo hri, Scan scan, ScanMetrics scanMetrics)
throws IOException {
// region is immutable, set isolation level
scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
// open region from the snapshot directory
this.region = HRegion.openHRegion(conf, fs, rootDir, hri, htd, null, null, null);
// create an internal region scanner
this.scanner = region.getScanner(scan);
values = new ArrayList<Cell>();
if (scanMetrics == null) {
initScanMetrics(scan);
} else {
this.scanMetrics = scanMetrics;
}
region.startRegionOperation();
}
示例2: updateCounters
import org.apache.hadoop.hbase.client.metrics.ScanMetrics; //导入依赖的package包/类
protected static void updateCounters(ScanMetrics scanMetrics, long numScannerRestarts,
Method getCounter, TaskAttemptContext context, long numStale) {
// we can get access to counters only if hbase uses new mapreduce APIs
if (getCounter == null) {
return;
}
try {
for (Map.Entry<String, Long> entry:scanMetrics.getMetricsMap().entrySet()) {
Counter ct = (Counter)getCounter.invoke(context,
HBASE_COUNTER_GROUP_NAME, entry.getKey());
ct.increment(entry.getValue());
}
((Counter) getCounter.invoke(context, HBASE_COUNTER_GROUP_NAME,
"NUM_SCANNER_RESTARTS")).increment(numScannerRestarts);
((Counter) getCounter.invoke(context, HBASE_COUNTER_GROUP_NAME,
"NUM_SCAN_RESULTS_STALE")).increment(numStale);
} catch (Exception e) {
LOG.debug("can't update counter." + StringUtils.stringifyException(e));
}
}
示例3: testMetric
import org.apache.hadoop.hbase.client.metrics.ScanMetrics; //导入依赖的package包/类
/**
* Run the scan to completetion and check the metric against the specified value
* @param scan
* @param metricKey
* @param expectedValue
* @throws Exception
*/
public void testMetric(Scan scan, String metricKey, long expectedValue) throws Exception {
assertTrue("Scan should be configured to record metrics", scan.isScanMetricsEnabled());
ResultScanner scanner = TABLE.getScanner(scan);
// Iterate through all the results
for (Result r : scanner) {
}
scanner.close();
ScanMetrics metrics = scan.getScanMetrics();
assertTrue("Metrics are null", metrics != null);
assertTrue("Metric : " + metricKey + " does not exist", metrics.hasCounter(metricKey));
final long actualMetricValue = metrics.getCounter(metricKey).get();
assertEquals("Metric: " + metricKey + " Expected: " + expectedValue + " Actual: "
+ actualMetricValue, expectedValue, actualMetricValue);
}
示例4: toScanMetrics
import org.apache.hadoop.hbase.client.metrics.ScanMetrics; //导入依赖的package包/类
public static ScanMetrics toScanMetrics(final byte[] bytes) {
Parser<MapReduceProtos.ScanMetrics> parser = MapReduceProtos.ScanMetrics.PARSER;
MapReduceProtos.ScanMetrics pScanMetrics = null;
try {
pScanMetrics = parser.parseFrom(bytes);
} catch (InvalidProtocolBufferException e) {
//Ignored there are just no key values to add.
}
ScanMetrics scanMetrics = new ScanMetrics();
if (pScanMetrics != null) {
for (HBaseProtos.NameInt64Pair pair : pScanMetrics.getMetricsList()) {
if (pair.hasName() && pair.hasValue()) {
scanMetrics.setCounter(pair.getName(), pair.getValue());
}
}
}
return scanMetrics;
}
示例5: ClientSideRegionScanner
import org.apache.hadoop.hbase.client.metrics.ScanMetrics; //导入依赖的package包/类
public ClientSideRegionScanner(Configuration conf, FileSystem fs,
Path rootDir, HTableDescriptor htd, HRegionInfo hri, Scan scan, ScanMetrics scanMetrics) throws IOException {
this.scan = scan;
// region is immutable, set isolation level
scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
// open region from the snapshot directory
this.region = HRegion.openHRegion(conf, fs, rootDir, hri, htd, null, null, null);
// create an internal region scanner
this.scanner = region.getScanner(scan);
values = new ArrayList<Cell>();
if (scanMetrics == null) {
initScanMetrics(scan);
} else {
this.scanMetrics = scanMetrics;
}
region.startRegionOperation();
}
示例6: updateCounters
import org.apache.hadoop.hbase.client.metrics.ScanMetrics; //导入依赖的package包/类
protected static void updateCounters(ScanMetrics scanMetrics, long numScannerRestarts,
Method getCounter, TaskAttemptContext context) {
// we can get access to counters only if hbase uses new mapreduce APIs
if (getCounter == null) {
return;
}
try {
for (Map.Entry<String, Long> entry:scanMetrics.getMetricsMap().entrySet()) {
Counter ct = (Counter)getCounter.invoke(context,
HBASE_COUNTER_GROUP_NAME, entry.getKey());
ct.increment(entry.getValue());
}
((Counter) getCounter.invoke(context, HBASE_COUNTER_GROUP_NAME,
"NUM_SCANNER_RESTARTS")).increment(numScannerRestarts);
} catch (Exception e) {
LOG.debug("can't update counter." + StringUtils.stringifyException(e));
}
}
示例7: ClientSideRegionScanner
import org.apache.hadoop.hbase.client.metrics.ScanMetrics; //导入依赖的package包/类
public ClientSideRegionScanner(Configuration conf, FileSystem fs,
Path rootDir, TableDescriptor htd, RegionInfo hri, Scan scan, ScanMetrics scanMetrics)
throws IOException {
// region is immutable, set isolation level
scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
htd = TableDescriptorBuilder.newBuilder(htd).setReadOnly(true).build();
// open region from the snapshot directory
this.region = HRegion.openHRegion(conf, fs, rootDir, hri, htd, null, null, null);
// create an internal region scanner
this.scanner = region.getScanner(scan);
values = new ArrayList<>();
if (scanMetrics == null) {
initScanMetrics(scan);
} else {
this.scanMetrics = scanMetrics;
}
region.startRegionOperation();
}
示例8: testMetric
import org.apache.hadoop.hbase.client.metrics.ScanMetrics; //导入依赖的package包/类
/**
* Run the scan to completetion and check the metric against the specified value
* @param scan
* @param metricKey
* @param expectedValue
* @throws Exception
*/
public void testMetric(Scan scan, String metricKey, long expectedValue) throws Exception {
assertTrue("Scan should be configured to record metrics", scan.isScanMetricsEnabled());
ResultScanner scanner = TABLE.getScanner(scan);
// Iterate through all the results
while (scanner.next() != null) {
}
scanner.close();
ScanMetrics metrics = scanner.getScanMetrics();
assertTrue("Metrics are null", metrics != null);
assertTrue("Metric : " + metricKey + " does not exist", metrics.hasCounter(metricKey));
final long actualMetricValue = metrics.getCounter(metricKey).get();
assertEquals("Metric: " + metricKey + " Expected: " + expectedValue + " Actual: "
+ actualMetricValue, expectedValue, actualMetricValue);
}
示例9: toScanMetrics
import org.apache.hadoop.hbase.client.metrics.ScanMetrics; //导入依赖的package包/类
public static ScanMetrics toScanMetrics(final byte[] bytes) {
MapReduceProtos.ScanMetrics pScanMetrics = null;
try {
pScanMetrics = MapReduceProtos.ScanMetrics.parseFrom(bytes);
} catch (InvalidProtocolBufferException e) {
// Ignored there are just no key values to add.
}
ScanMetrics scanMetrics = new ScanMetrics();
if (pScanMetrics != null) {
for (HBaseProtos.NameInt64Pair pair : pScanMetrics.getMetricsList()) {
if (pair.hasName() && pair.hasValue()) {
scanMetrics.setCounter(pair.getName(), pair.getValue());
}
}
}
return scanMetrics;
}
示例10: AsyncClientScanner
import org.apache.hadoop.hbase.client.metrics.ScanMetrics; //导入依赖的package包/类
public AsyncClientScanner(Scan scan, AdvancedScanResultConsumer consumer, TableName tableName,
AsyncConnectionImpl conn, long pauseNs, int maxAttempts, long scanTimeoutNs,
long rpcTimeoutNs, int startLogErrorsCnt) {
if (scan.getStartRow() == null) {
scan.withStartRow(EMPTY_START_ROW, scan.includeStartRow());
}
if (scan.getStopRow() == null) {
scan.withStopRow(EMPTY_END_ROW, scan.includeStopRow());
}
this.scan = scan;
this.consumer = consumer;
this.tableName = tableName;
this.conn = conn;
this.pauseNs = pauseNs;
this.maxAttempts = maxAttempts;
this.scanTimeoutNs = scanTimeoutNs;
this.rpcTimeoutNs = rpcTimeoutNs;
this.startLogErrorsCnt = startLogErrorsCnt;
this.resultCache = createScanResultCache(scan);
if (scan.isScanMetricsEnabled()) {
this.scanMetrics = new ScanMetrics();
consumer.onScanMetricsCreated(scanMetrics);
} else {
this.scanMetrics = null;
}
}
示例11: updateResultsMetrics
import org.apache.hadoop.hbase.client.metrics.ScanMetrics; //导入依赖的package包/类
static void updateResultsMetrics(ScanMetrics scanMetrics, Result[] rrs,
boolean isRegionServerRemote) {
if (scanMetrics == null || rrs == null || rrs.length == 0) {
return;
}
long resultSize = 0;
for (Result rr : rrs) {
for (Cell cell : rr.rawCells()) {
resultSize += PrivateCellUtil.estimatedSerializedSizeOf(cell);
}
}
scanMetrics.countOfBytesInResults.addAndGet(resultSize);
if (isRegionServerRemote) {
scanMetrics.countOfBytesInRemoteResults.addAndGet(resultSize);
}
}
示例12: closeScanner
import org.apache.hadoop.hbase.client.metrics.ScanMetrics; //导入依赖的package包/类
private void closeScanner() {
if (logger.isDebugEnabled() && scan != null) {
logger.debug("Scan " + scan.toString());
byte[] metricsBytes = scan.getAttribute(Scan.SCAN_ATTRIBUTES_METRICS_DATA);
if (metricsBytes != null) {
ScanMetrics scanMetrics = ProtobufUtil.toScanMetrics(metricsBytes);
logger.debug("HBase Metrics: " + "count={}, ms={}, bytes={}, remote_bytes={}, regions={}, not_serving_region={}, rpc={}, rpc_retries={}, remote_rpc={}, remote_rpc_retries={}", new Object[] { scanCount, scanMetrics.sumOfMillisSecBetweenNexts, scanMetrics.countOfBytesInResults, scanMetrics.countOfBytesInRemoteResults, scanMetrics.countOfRegions, scanMetrics.countOfNSRE, scanMetrics.countOfRPCcalls, scanMetrics.countOfRPCRetries, scanMetrics.countOfRemoteRPCcalls, scanMetrics.countOfRemoteRPCRetries });
}
}
try {
if (scanner != null) {
scanner.close();
scanner = null;
}
} catch (Throwable t) {
throw new StorageException("Error when close scanner for table " + tableName, t);
}
}
示例13: ClientSideRegionScanner
import org.apache.hadoop.hbase.client.metrics.ScanMetrics; //导入依赖的package包/类
public ClientSideRegionScanner(Configuration conf, FileSystem fs,
Path rootDir, HTableDescriptor htd, HRegionInfo hri, Scan scan, ScanMetrics scanMetrics)
throws IOException {
this.scan = scan;
// region is immutable, set isolation level
scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
// open region from the snapshot directory
this.region = HRegion.openHRegion(conf, fs, rootDir, hri, htd, null, null, null);
// create an internal region scanner
this.scanner = region.getScanner(scan);
values = new ArrayList<Cell>();
if (scanMetrics == null) {
initScanMetrics(scan);
} else {
this.scanMetrics = scanMetrics;
}
region.startRegionOperation();
}
示例14: toScanMetrics
import org.apache.hadoop.hbase.client.metrics.ScanMetrics; //导入依赖的package包/类
public static ScanMetrics toScanMetrics(final byte[] bytes) {
MapReduceProtos.ScanMetrics.Builder builder = MapReduceProtos.ScanMetrics.newBuilder();
try {
builder.mergeFrom(bytes);
} catch (InvalidProtocolBufferException e) {
//Ignored there are just no key values to add.
}
MapReduceProtos.ScanMetrics pScanMetrics = builder.build();
ScanMetrics scanMetrics = new ScanMetrics();
for (HBaseProtos.NameInt64Pair pair : pScanMetrics.getMetricsList()) {
if (pair.hasName() && pair.hasValue()) {
scanMetrics.setCounter(pair.getName(), pair.getValue());
}
}
return scanMetrics;
}
示例15: nextKeyValue
import org.apache.hadoop.hbase.client.metrics.ScanMetrics; //导入依赖的package包/类
@Override
public boolean nextKeyValue() throws IOException, InterruptedException {
boolean result = delegate.nextKeyValue();
if (result) {
ScanMetrics scanMetrics = delegate.getScanner().getScanMetrics();
if (scanMetrics != null && context != null) {
TableRecordReaderImpl.updateCounters(scanMetrics, 0, getCounter, context, 0);
}
}
return result;
}