本文整理汇总了Java中org.apache.hadoop.metrics.MetricsUtil.createRecord方法的典型用法代码示例。如果您正苦于以下问题:Java MetricsUtil.createRecord方法的具体用法?Java MetricsUtil.createRecord怎么用?Java MetricsUtil.createRecord使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.metrics.MetricsUtil
的用法示例。
在下文中一共展示了MetricsUtil.createRecord方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: RegionServerDynamicMetrics
import org.apache.hadoop.metrics.MetricsUtil; //导入方法依赖的package包/类
private RegionServerDynamicMetrics(HRegionServer regionServer) {
this.context = MetricsUtil.getContext("hbase");
this.metricsRecord = MetricsUtil.createRecord(
this.context,
"RegionServerDynamicStatistics");
context.registerUpdater(this);
this.rsDynamicStatistics = new RegionServerDynamicStatistics(this.registry);
this.regionServer = regionServer;
try {
updateMbeanInfoIfMetricsListChanged =
this.rsDynamicStatistics.getClass().getSuperclass()
.getDeclaredMethod("updateMbeanInfoIfMetricsListChanged",
new Class[]{});
updateMbeanInfoIfMetricsListChanged.setAccessible(true);
} catch (Exception e) {
LOG.error(e);
}
}
示例2: RegionServerMetrics
import org.apache.hadoop.metrics.MetricsUtil; //导入方法依赖的package包/类
public RegionServerMetrics() {
MetricsContext context = MetricsUtil.getContext("hbase");
metricsRecord = MetricsUtil.createRecord(context, "regionserver");
String name = Thread.currentThread().getName();
metricsRecord.setTag("RegionServer", name);
context.registerUpdater(this);
// Add jvmmetrics.
JvmMetrics.init("RegionServer", name);
// Add Hbase Info metrics
HBaseInfo.init();
// export for JMX
statistics = new RegionServerStatistics(this.registry, name);
// get custom attributes
try {
Object m = ContextFactory.getFactory().getAttribute("hbase.extendedperiod");
if (m instanceof String) {
this.extendedPeriod = Long.parseLong((String) m)*1000;
}
} catch (IOException ioe) {
LOG.info("Couldn't load ContextFactory for Metrics config info");
}
LOG.info("Initialized");
}
示例3: HBaseRpcMetrics
import org.apache.hadoop.metrics.MetricsUtil; //导入方法依赖的package包/类
public HBaseRpcMetrics(String hostName, String port) {
MetricsContext context = MetricsUtil.getContext("rpc");
metricsRecord = MetricsUtil.createRecord(context, "metrics");
metricsRecord.setTag("port", port);
LOG.info("Initializing RPC Metrics with hostName="
+ hostName + ", port=" + port);
context.registerUpdater(this);
initMethods(HMasterInterface.class);
initMethods(HMasterRegionInterface.class);
initMethods(HRegionInterface.class);
rpcStatistics = new HBaseRPCStatistics(this.registry, hostName, port);
}
示例4: ClusterManagerMetrics
import org.apache.hadoop.metrics.MetricsUtil; //导入方法依赖的package包/类
/**
* Constructor.
* @param types The available resource types.
*/
public ClusterManagerMetrics(Collection<ResourceType> types) {
context = MetricsUtil.getContext(CONTEXT_NAME);
metricsRecord = MetricsUtil.createRecord(context, CONTEXT_NAME);
typeToResourceRequested = createTypeToResourceCountMap(types, "requested");
typeToResourceGranted = createTypeToResourceCountMap(types, "granted");
typeToResourceRevoked = createTypeToResourceCountMap(types, "revoked");
typeToResourceReleased = createTypeToResourceCountMap(types, "released");
typeToPendingCount = createTypeToCountMap(types, "pending");
typeToRunningCount = createTypeToCountMap(types, "running");
typeToTotalSlots = createTypeToCountMap(types, "total");
typeToFreeSlots = createTypeToCountMap(types, "free");
typeToSchedulerRunTime = createTypeToCountMap(types, "scheduler_runtime");
sessionStatusToMetrics = createSessionStatusToMetricsMap();
aliveNodes = new MetricsIntValue("alive_nodes", registry);
deadNodes = new MetricsIntValue("dead_nodes", registry);
blacklistedNodes = new MetricsIntValue("blacklisted_nodes", registry);
numRunningSessions = new MetricsIntValue("num_running_sessions", registry);
totalSessionCount = new MetricsTimeVaryingInt("total_sessions", registry);
pendingCallsCount = new MetricsIntValue("num_pending_calls", registry);
numCJTFailures = new MetricsTimeVaryingInt("num_cjt_failures", registry);
}
示例5: JvmMetrics
import org.apache.hadoop.metrics.MetricsUtil; //导入方法依赖的package包/类
/** Creates a new instance of JvmMetrics */
private JvmMetrics(String processName, String sessionId,
String recordName) {
MetricsContext context = MetricsUtil.getContext("jvm");
metrics = MetricsUtil.createRecord(context, recordName);
metrics.setTag("processName", processName);
metrics.setTag("sessionId", sessionId);
context.registerUpdater(this);
}
示例6: LocalJobRunnerMetrics
import org.apache.hadoop.metrics.MetricsUtil; //导入方法依赖的package包/类
public LocalJobRunnerMetrics(JobConf conf) {
String sessionId = conf.getSessionId();
// Initiate JVM Metrics
JvmMetrics.init("JobTracker", sessionId);
// Create a record for map-reduce metrics
MetricsContext context = MetricsUtil.getContext("mapred");
// record name is jobtracker for compatibility
metricsRecord = MetricsUtil.createRecord(context, "jobtracker");
metricsRecord.setTag("sessionId", sessionId);
context.registerUpdater(this);
}
示例7: ShuffleClientMetrics
import org.apache.hadoop.metrics.MetricsUtil; //导入方法依赖的package包/类
ShuffleClientMetrics(TaskAttemptID reduceId, JobConf jobConf) {
this.numCopiers = jobConf.getInt(MRJobConfig.SHUFFLE_PARALLEL_COPIES, 5);
MetricsContext metricsContext = MetricsUtil.getContext("mapred");
this.shuffleMetrics =
MetricsUtil.createRecord(metricsContext, "shuffleInput");
this.shuffleMetrics.setTag("user", jobConf.getUser());
this.shuffleMetrics.setTag("jobName", jobConf.getJobName());
this.shuffleMetrics.setTag("jobId", reduceId.getJobID().toString());
this.shuffleMetrics.setTag("taskId", reduceId.toString());
this.shuffleMetrics.setTag("sessionId", jobConf.getSessionId());
metricsContext.registerUpdater(this);
}
示例8: ReplicationSourceMetrics
import org.apache.hadoop.metrics.MetricsUtil; //导入方法依赖的package包/类
/**
* Constructor used to register the metrics
* @param id Name of the source this class is monitoring
*/
public ReplicationSourceMetrics(String id) {
MetricsContext context = MetricsUtil.getContext("hbase");
String name = Thread.currentThread().getName();
metricsRecord = MetricsUtil.createRecord(context, "replication");
metricsRecord.setTag("RegionServer", name);
context.registerUpdater(this);
try {
id = URLEncoder.encode(id, "UTF8");
} catch (UnsupportedEncodingException e) {
id = "CAN'T ENCODE UTF8";
}
// export for JMX
replicationStatistics = new ReplicationStatistics(this.registry, "ReplicationSource for " + id);
}
示例9: BalancerMetrics
import org.apache.hadoop.metrics.MetricsUtil; //导入方法依赖的package包/类
public BalancerMetrics(Configuration conf, InetSocketAddress namenodeAddress) {
String sessionId = conf.get("session.id");
JvmMetrics.init("Balancer", sessionId);
balancerActivityMBean = new BalancerActivityMBean(registry, conf, namenodeAddress);
MetricsContext context = MetricsUtil.getContext("dfs");
metricsRecord = MetricsUtil.createRecord(context, "balancer");
metricsRecord.setTag("sessionId", sessionId);
context.registerUpdater(this);
}
示例10: ThriftMetrics
import org.apache.hadoop.metrics.MetricsUtil; //导入方法依赖的package包/类
public ThriftMetrics(int port, Configuration conf, Class<?> iface) {
slowResponseTime = conf.getLong(
SLOW_RESPONSE_NANO_SEC, DEFAULT_SLOW_RESPONSE_NANO_SEC);
context = MetricsUtil.getContext(CONTEXT_NAME);
metricsRecord = MetricsUtil.createRecord(context, CONTEXT_NAME);
metricsRecord.setTag("port", port + "");
LOG.info("Initializing RPC Metrics with port=" + port);
context.registerUpdater(this);
createMetricsForMethods(iface);
}
示例11: DataNodeMetrics
import org.apache.hadoop.metrics.MetricsUtil; //导入方法依赖的package包/类
public DataNodeMetrics(Configuration conf, String storageId) {
String sessionId = conf.get("session.id");
// Initiate reporting of Java VM metrics
JvmMetrics.init("DataNode", sessionId);
// Now the MBean for the data node
datanodeActivityMBean = new DataNodeActivityMBean(registry, storageId);
// Create record for DataNode metrics
MetricsContext context = MetricsUtil.getContext("dfs");
metricsRecord = MetricsUtil.createRecord(context, "datanode");
metricsRecord.setTag("sessionId", sessionId);
context.registerUpdater(this);
}
示例12: ShuffleServerMetrics
import org.apache.hadoop.metrics.MetricsUtil; //导入方法依赖的package包/类
ShuffleServerMetrics(JobConf conf) {
MetricsContext context = MetricsUtil.getContext("mapred");
shuffleMetricsRecord =
MetricsUtil.createRecord(context, "shuffleOutput");
this.shuffleMetricsRecord.setTag("sessionId", conf.getSessionId());
context.registerUpdater(this);
}
示例13: ShuffleServerMetrics
import org.apache.hadoop.metrics.MetricsUtil; //导入方法依赖的package包/类
ShuffleServerMetrics(JobConf conf) {
MetricsContext context = MetricsUtil.getContext("mapred");
shuffleMetricsRecord =
MetricsUtil.createRecord(context, "shuffleOutput");
this.shuffleMetricsRecord.setTag("sessionId", conf.getSessionId());
context.registerUpdater(this);
}
示例14: TaskTrackerMetricsInst
import org.apache.hadoop.metrics.MetricsUtil; //导入方法依赖的package包/类
public TaskTrackerMetricsInst(TaskTracker t) {
super(t);
JobConf conf = tt.getJobConf();
extraJvms = conf.getInt(EXTRA_JVMS, 16);
String sessionId = conf.getSessionId();
// Initiate Java VM Metrics
JvmMetrics.init("TaskTracker", sessionId);
// Create a record for Task Tracker metrics
MetricsContext context = MetricsUtil.getContext("mapred");
metricsRecord = MetricsUtil.createRecord(context, "tasktracker"); //guaranteed never null
metricsRecord.setTag("sessionId", sessionId);
context.registerUpdater(this);
}
示例15: ReplicationSinkMetrics
import org.apache.hadoop.metrics.MetricsUtil; //导入方法依赖的package包/类
/**
* Constructor used to register the metrics
*/
public ReplicationSinkMetrics() {
MetricsContext context = MetricsUtil.getContext("hbase");
String name = Thread.currentThread().getName();
metricsRecord = MetricsUtil.createRecord(context, "replication");
metricsRecord.setTag("RegionServer", name);
context.registerUpdater(this);
// export for JMX
new ReplicationStatistics(this.registry, "ReplicationSink");
}