本文整理汇总了Java中org.apache.hadoop.metrics2.source.JvmMetrics类的典型用法代码示例。如果您正苦于以下问题:Java JvmMetrics类的具体用法?Java JvmMetrics怎么用?Java JvmMetrics使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
JvmMetrics类属于org.apache.hadoop.metrics2.source包,在下文中一共展示了JvmMetrics类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: serviceInit
import org.apache.hadoop.metrics2.source.JvmMetrics; //导入依赖的package包/类
@Override
protected void serviceInit(Configuration conf) throws Exception {
// init timeline services first
timelineStore = createTimelineStore(conf);
addIfService(timelineStore);
secretManagerService = createTimelineDelegationTokenSecretManagerService(conf);
addService(secretManagerService);
timelineDataManager = createTimelineDataManager(conf);
addService(timelineDataManager);
// init generic history service afterwards
aclsManager = createApplicationACLsManager(conf);
historyManager = createApplicationHistoryManager(conf);
ahsClientService = createApplicationHistoryClientService(historyManager);
addService(ahsClientService);
addService((Service) historyManager);
DefaultMetricsSystem.initialize("ApplicationHistoryServer");
JvmMetrics.initSingleton("ApplicationHistoryServer", null);
super.serviceInit(conf);
}
示例2: Nfs3Metrics
import org.apache.hadoop.metrics2.source.JvmMetrics; //导入依赖的package包/类
public Nfs3Metrics(String name, String sessionId, int[] intervals,
final JvmMetrics jvmMetrics) {
this.name = name;
this.jvmMetrics = jvmMetrics;
registry.tag(SessionId, sessionId);
final int len = intervals.length;
readNanosQuantiles = new MutableQuantiles[len];
writeNanosQuantiles = new MutableQuantiles[len];
commitNanosQuantiles = new MutableQuantiles[len];
for (int i = 0; i < len; i++) {
int interval = intervals[i];
readNanosQuantiles[i] = registry.newQuantiles("readProcessNanos"
+ interval + "s", "Read process in ns", "ops", "latency", interval);
writeNanosQuantiles[i] = registry.newQuantiles("writeProcessNanos"
+ interval + "s", "Write process in ns", "ops", "latency", interval);
commitNanosQuantiles[i] = registry.newQuantiles("commitProcessNanos"
+ interval + "s", "Commit process in ns", "ops", "latency", interval);
}
}
示例3: NameNodeMetrics
import org.apache.hadoop.metrics2.source.JvmMetrics; //导入依赖的package包/类
NameNodeMetrics(String processName, String sessionId, int[] intervals,
final JvmMetrics jvmMetrics) {
this.jvmMetrics = jvmMetrics;
registry.tag(ProcessName, processName).tag(SessionId, sessionId);
final int len = intervals.length;
syncsQuantiles = new MutableQuantiles[len];
blockReportQuantiles = new MutableQuantiles[len];
cacheReportQuantiles = new MutableQuantiles[len];
for (int i = 0; i < len; i++) {
int interval = intervals[i];
syncsQuantiles[i] = registry.newQuantiles(
"syncs" + interval + "s",
"Journal syncs", "ops", "latency", interval);
blockReportQuantiles[i] = registry.newQuantiles(
"blockReport" + interval + "s",
"Block report", "ops", "latency", interval);
cacheReportQuantiles[i] = registry.newQuantiles(
"cacheReport" + interval + "s",
"Cache report", "ops", "latency", interval);
}
}
示例4: start
import org.apache.hadoop.metrics2.source.JvmMetrics; //导入依赖的package包/类
/**
* Start listening for edits via RPC.
*/
public void start() throws IOException {
Preconditions.checkState(!isStarted(), "JN already running");
validateAndCreateJournalDir(localDir);
DefaultMetricsSystem.initialize("JournalNode");
JvmMetrics.create("JournalNode",
conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY),
DefaultMetricsSystem.instance());
InetSocketAddress socAddr = JournalNodeRpcServer.getAddress(conf);
SecurityUtil.login(conf, DFSConfigKeys.DFS_JOURNALNODE_KEYTAB_FILE_KEY,
DFSConfigKeys.DFS_JOURNALNODE_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName());
registerJNMXBean();
httpServer = new JournalNodeHttpServer(conf, this);
httpServer.start();
httpServerURI = httpServer.getServerURI().toString();
rpcServer = new JournalNodeRpcServer(conf, this);
rpcServer.start();
}
示例5: serviceInit
import org.apache.hadoop.metrics2.source.JvmMetrics; //导入依赖的package包/类
@Override
protected void serviceInit(Configuration conf) throws Exception {
// init timeline services first
timelineStore = createTimelineStore(conf);
addIfService(timelineStore);
secretManagerService = createTimelineDelegationTokenSecretManagerService(conf);
addService(secretManagerService);
timelineDataManager = createTimelineDataManager(conf);
addService(timelineDataManager);
// init generic history service afterwards
aclsManager = createApplicationACLsManager(conf);
historyManager = createApplicationHistoryManager(conf);
ahsClientService = createApplicationHistoryClientService(historyManager);
addService(ahsClientService);
addService((Service) historyManager);
DefaultMetricsSystem.initialize("ApplicationHistoryServer");
JvmMetrics jm = JvmMetrics.initSingleton("ApplicationHistoryServer", null);
pauseMonitor = new JvmPauseMonitor();
addService(pauseMonitor);
jm.setPauseMonitor(pauseMonitor);
super.serviceInit(conf);
}
示例6: Nfs3Metrics
import org.apache.hadoop.metrics2.source.JvmMetrics; //导入依赖的package包/类
public Nfs3Metrics(String name, String sessionId, int[] intervals,
final JvmMetrics jvmMetrics) {
this.name = name;
this.jvmMetrics = jvmMetrics;
registry.tag(SessionId, sessionId);
final int len = intervals.length;
readNanosQuantiles = new MutableQuantiles[len];
writeNanosQuantiles = new MutableQuantiles[len];
commitNanosQuantiles = new MutableQuantiles[len];
for (int i = 0; i < len; i++) {
int interval = intervals[i];
readNanosQuantiles[i] = registry.newQuantiles("readProcessNanos"
+ interval + "s", "Read process in ns", "ops", "latency", interval);
writeNanosQuantiles[i] = registry.newQuantiles("writeProcessNanos"
+ interval + "s", " process in ns", "ops", "latency", interval);
commitNanosQuantiles[i] = registry.newQuantiles("commitProcessNanos"
+ interval + "s", "Read process in ns", "ops", "latency", interval);
}
}
示例7: start
import org.apache.hadoop.metrics2.source.JvmMetrics; //导入依赖的package包/类
/**
* Start listening for edits via RPC.
*/
public void start() throws IOException {
Preconditions.checkState(!isStarted(), "JN already running");
validateAndCreateJournalDir(localDir);
DefaultMetricsSystem.initialize("JournalNode");
JvmMetrics.create("JournalNode",
conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY),
DefaultMetricsSystem.instance());
InetSocketAddress socAddr = JournalNodeRpcServer.getAddress(conf);
SecurityUtil.login(conf, DFSConfigKeys.DFS_JOURNALNODE_KEYTAB_FILE_KEY,
DFSConfigKeys.DFS_JOURNALNODE_USER_NAME_KEY, socAddr.getHostName());
httpServer = new JournalNodeHttpServer(conf, this);
httpServer.start();
rpcServer = new JournalNodeRpcServer(conf, this);
rpcServer.start();
}
示例8: start
import org.apache.hadoop.metrics2.source.JvmMetrics; //导入依赖的package包/类
/**
* Start listening for edits via RPC.
*/
public void start() throws IOException {
Preconditions.checkState(!isStarted(), "JN already running");
validateAndCreateJournalDir(localDir);
DefaultMetricsSystem.initialize("JournalNode");
JvmMetrics.create("JournalNode",
conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY),
DefaultMetricsSystem.instance());
InetSocketAddress socAddr = JournalNodeRpcServer.getAddress(conf);
SecurityUtil.login(conf, DFSConfigKeys.DFS_JOURNALNODE_KEYTAB_FILE_KEY,
DFSConfigKeys.DFS_JOURNALNODE_USER_NAME_KEY, socAddr.getHostName());
registerJNMXBean();
httpServer = new JournalNodeHttpServer(conf, this);
httpServer.start();
rpcServer = new JournalNodeRpcServer(conf, this);
rpcServer.start();
}
示例9: start
import org.apache.hadoop.metrics2.source.JvmMetrics; //导入依赖的package包/类
/**
* Start listening for edits via RPC.
*/
public void start() throws IOException {
Preconditions.checkState(!isStarted(), "JN already running");
validateAndCreateJournalDir(localDir);
DefaultMetricsSystem.initialize("JournalNode");
JvmMetrics.create("JournalNode",
conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY),
DefaultMetricsSystem.instance());
InetSocketAddress socAddr = JournalNodeRpcServer.getAddress(conf);
SecurityUtil.login(conf, DFSConfigKeys.DFS_JOURNALNODE_KEYTAB_FILE_KEY,
DFSConfigKeys.DFS_JOURNALNODE_USER_NAME_KEY, socAddr.getHostName());
registerJNMXBean();
httpServer = new JournalNodeHttpServer(conf, this);
httpServer.start();
httpServerURI = httpServer.getServerURI().toString();
rpcServer = new JournalNodeRpcServer(conf, this);
rpcServer.start();
}
示例10: serviceInit
import org.apache.hadoop.metrics2.source.JvmMetrics; //导入依赖的package包/类
@Override
protected void serviceInit(Configuration conf) throws Exception {
this.store = createSCMStoreService(conf);
addService(store);
CleanerService cs = createCleanerService(store);
addService(cs);
SharedCacheUploaderService nms =
createNMCacheUploaderSCMProtocolService(store);
addService(nms);
ClientProtocolService cps = createClientProtocolService(store);
addService(cps);
SCMAdminProtocolService saps = createSCMAdminProtocolService(cs);
addService(saps);
SCMWebServer webUI = createSCMWebServer(this);
addService(webUI);
// init metrics
DefaultMetricsSystem.initialize("SharedCacheManager");
JvmMetrics.initSingleton("SharedCacheManager", null);
super.serviceInit(conf);
}
示例11: create
import org.apache.hadoop.metrics2.source.JvmMetrics; //导入依赖的package包/类
public static Nfs3Metrics create(Configuration conf, String gatewayName) {
String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY);
MetricsSystem ms = DefaultMetricsSystem.instance();
JvmMetrics jm = JvmMetrics.create(gatewayName, sessionId, ms);
// Percentile measurement is [50th,75th,90th,95th,99th] currently
int[] intervals = conf
.getInts(NfsConfigKeys.NFS_METRICS_PERCENTILES_INTERVALS_KEY);
return ms.register(new Nfs3Metrics(gatewayName, sessionId, intervals, jm));
}
示例12: create
import org.apache.hadoop.metrics2.source.JvmMetrics; //导入依赖的package包/类
public static NameNodeMetrics create(Configuration conf, NamenodeRole r) {
String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY);
String processName = r.toString();
MetricsSystem ms = DefaultMetricsSystem.instance();
JvmMetrics jm = JvmMetrics.create(processName, sessionId, ms);
// Percentile measurement is off by default, by watching no intervals
int[] intervals =
conf.getInts(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY);
return ms.register(new NameNodeMetrics(processName, sessionId,
intervals, jm));
}
示例13: create
import org.apache.hadoop.metrics2.source.JvmMetrics; //导入依赖的package包/类
public static DataNodeMetrics create(Configuration conf, String dnName) {
String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY);
MetricsSystem ms = DefaultMetricsSystem.instance();
JvmMetrics jm = JvmMetrics.create("DataNode", sessionId, ms);
String name = "DataNodeActivity-"+ (dnName.isEmpty()
? "UndefinedDataNodeName"+ DFSUtil.getRandom().nextInt()
: dnName.replace(':', '-'));
// Percentile measurement is off by default, by watching no intervals
int[] intervals =
conf.getInts(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY);
return ms.register(name, null, new DataNodeMetrics(name, sessionId,
intervals, jm));
}
示例14: serviceInit
import org.apache.hadoop.metrics2.source.JvmMetrics; //导入依赖的package包/类
@Override
protected void serviceInit(Configuration conf) throws Exception {
Configuration config = new YarnConfiguration(conf);
doSecureLogin(conf);
proxy = new WebAppProxy();
addService(proxy);
DefaultMetricsSystem.initialize("WebAppProxyServer");
JvmMetrics jm = JvmMetrics.initSingleton("WebAppProxyServer", null);
pauseMonitor = new JvmPauseMonitor();
addService(pauseMonitor);
jm.setPauseMonitor(pauseMonitor);
super.serviceInit(config);
}
示例15: serviceInit
import org.apache.hadoop.metrics2.source.JvmMetrics; //导入依赖的package包/类
@Override
protected void serviceInit(Configuration conf) throws Exception {
Configuration config = new YarnConfiguration(conf);
config.setBoolean(Dispatcher.DISPATCHER_EXIT_ON_ERROR_KEY, true);
// This is required for WebApps to use https if enabled.
MRWebAppUtil.initialize(getConfig());
try {
doSecureLogin(conf);
} catch(IOException ie) {
throw new YarnRuntimeException("History Server Failed to login", ie);
}
jobHistoryService = new JobHistory();
historyContext = (HistoryContext)jobHistoryService;
stateStore = createStateStore(conf);
this.jhsDTSecretManager = createJHSSecretManager(conf, stateStore);
clientService = createHistoryClientService();
aggLogDelService = new AggregatedLogDeletionService();
hsAdminServer = new HSAdminServer(aggLogDelService, jobHistoryService);
addService(stateStore);
addService(new HistoryServerSecretManagerService());
addService(jobHistoryService);
addService(clientService);
addService(aggLogDelService);
addService(hsAdminServer);
DefaultMetricsSystem.initialize("JobHistoryServer");
JvmMetrics jm = JvmMetrics.initSingleton("JobHistoryServer", null);
pauseMonitor = new JvmPauseMonitor();
addService(pauseMonitor);
jm.setPauseMonitor(pauseMonitor);
super.serviceInit(config);
}