本文整理汇总了Python中cm_api.api_client.ApiResource.query_timeseries方法的典型用法代码示例。如果您正苦于以下问题:Python ApiResource.query_timeseries方法的具体用法?Python ApiResource.query_timeseries怎么用?Python ApiResource.query_timeseries使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cm_api.api_client.ApiResource
的用法示例。
在下文中一共展示了ApiResource.query_timeseries方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: TimeSeriesQuery
# 需要导入模块: from cm_api.api_client import ApiResource [as 别名]
# 或者: from cm_api.api_client.ApiResource import query_timeseries [as 别名]
class TimeSeriesQuery(object):
"""
"""
def __init__(self):
self._api = ApiResource(CM_HOST, username=CM_USER, password=CM_PASSWD, use_tls=CM_USE_TLS)
def query(self, query, from_time, to_time):
return self._api.query_timeseries(query, from_time, to_time)
示例2: do_call
# 需要导入模块: from cm_api.api_client import ApiResource [as 别名]
# 或者: from cm_api.api_client.ApiResource import query_timeseries [as 别名]
def do_call(user, password, man_host, man_port, nav_host, nav_port, app_name, app_version, app_namespace, app_time, app_start, app_end,
app_dashboard,
app_report_only):
cpu = 0
hdfs = 0
network = 0
if app_report_only:
app_start = '0'
app_end = '0'
dashboard_name = 'Release (' + app_namespace + ')'
if not app_report_only:
api = ApiResource(man_host, man_port, user, password, False, MAN_API_VERSION)
with open(app_dashboard, 'r') as dashboard_data_file:
dashboard_data = dashboard_data_file.read()
try:
create_dashboards(api, [ApiDashboard(api, dashboard_name, dashboard_data)])
except ApiException:
pass
for view_plot in json.loads(dashboard_data)['viewPlots']:
for key, value in view_plot['plot'].items():
if key == 'tsquery':
for time_series in \
api.query_timeseries(value, datetime.datetime.fromtimestamp(float(app_start)),
datetime.datetime.fromtimestamp(float(app_end)))[
0].timeSeries:
if time_series.metadata.metricName == 'cpu_percent_across_hosts':
cpu = compress_bins(time_series.data, 1)
if time_series.metadata.metricName == 'total_bytes_read_rate_across_datanodes':
hdfs += compress_bins(time_series.data, 100000)
if time_series.metadata.metricName == 'total_bytes_written_rate_across_datanodes':
hdfs += compress_bins(time_series.data, 100000)
if time_series.metadata.metricName == 'total_bytes_receive_rate_across_network_interfaces':
network += compress_bins(time_series.data, 100000)
if time_series.metadata.metricName == 'total_bytes_transmit_rate_across_network_interfaces':
network += compress_bins(time_series.data, 100000)
properties = [ \
{'name': 'Name', 'description': 'Application name', 'value': {'Name': [app_name]}}, \
{'name': 'Version', 'description': 'Application version', 'value': {'Version': [app_version]}}, \
{'name': 'Run', 'description': 'Run time', 'value': {'Run': [app_time]}}, \
{'name': 'Start', 'description': 'Start time', 'value': {'Start': [app_start + '000']}}, \
{'name': 'Finish', 'description': 'Finish time', 'value': {'Finish': [app_end + '000']}}, \
{'name': 'CPU', 'description': 'Relative CPU usage during benchmark', 'value': {'CPU': [str(cpu)]}}, \
{'name': 'HDFS', 'description': 'Relative HDFS usage during benchmark', 'value': {'HDFS': [str(hdfs)]}}, \
{'name': 'Network', 'description': 'Relative Network usage during benchmark', 'value': {'Network': [str(network)]}} \
]
app_properties = update_metadata(user, password, nav_host, nav_port, app_namespace, 'Benchmark', properties, app_report_only)
app_table_comparison = '{:<15} |{:>15} |{:>15} |{:>15} |{:>15} |{:>15} |{:>15}|'
app_table = [['Application', app_name + '-' + app_version]]
if not app_report_only:
app_table.append(['Run', app_time + 's (' + str((int(app_time) / 60)) + 'm)'])
app_table.append(
['Start', datetime.datetime.fromtimestamp(float(app_start)).strftime('%Y-%m-%d %H:%M:%S') + ' (' + app_start + '000)'])
app_table.append(
['Finish', datetime.datetime.fromtimestamp(float(app_end)).strftime('%Y-%m-%d %H:%M:%S') + ' (' + app_end + '000)'])
if app_properties['database']:
app_table.append(['Metadata', 'http://localhost:7187/?view=detailsView&id=' + app_properties['database']])
app_dashbaord_uri = 'http://localhost:7180/cmf/views/view?viewName=' + urllib.quote_plus(dashboard_name)
if app_report_only:
app_table.append(['Dashboard', app_dashbaord_uri])
else:
app_table.append(['Dashboard', app_dashbaord_uri + '#startTime=' + app_start + '000&endTime=' + app_end + '000'])
app_table.append(['Comparison', app_table_comparison.format('Version', 'Start', 'Finish', 'Run', 'CPU', 'HDFS', 'Network')])
for properties_value in app_properties['properties']:
app_table.append([None, app_table_comparison.format(', '.join(properties_value['Version']), ', '.join(properties_value['Start']),
', '.join(properties_value['Finish']), ', '.join(properties_value['Run']),
', '.join(properties_value['CPU']), ', '.join(properties_value['HDFS']),
', '.join(properties_value['Network']))])
print tabulate(app_table, tablefmt='grid')
示例3: print
# 需要导入模块: from cm_api.api_client import ApiResource [as 别名]
# 或者: from cm_api.api_client.ApiResource import query_timeseries [as 别名]
# for m in metrics:
# print "%s (%s)" % (m.name, m.unit)
# 查询图标信息
import time
import datetime
from_time = datetime.datetime.fromtimestamp(time.time() - 180)
to_time = datetime.datetime.fromtimestamp(time.time())
query = "select files_total, dfs_capacity_used " \
"where serviceName = HDFS " \
" and category = SERVICE"
query1 = 'select swap_used, physical_memory_used, physical_memory_total, physical_memory_cached, physical_memory_buffers where entityName="bd8a6ef9-808a-49db-822a-4ce8146ad315"'
result = api.query_timeseries(query1, from_time, to_time)
ts_list = result[0]
for ts in ts_list.timeSeries:
print "--- %s: %s ---" % (ts.metadata.entityName, ts.metadata.metricName)
for point in ts.data:
print "%s:\t%s" % (point.timestamp.isoformat(), point.value)
print("=============================================")
# for h in api.get_all_hosts():
# for i in h.get_metrics():
# print(i.context)
#
# print(h.healthSummary)
# print(dir(h))
# print(dir(i))
示例4: main
# 需要导入模块: from cm_api.api_client import ApiResource [as 别名]
# 或者: from cm_api.api_client.ApiResource import query_timeseries [as 别名]
def main():
global ec2con
global cwcon
ec2con = boto.ec2.connect_to_region('us-east-1')
cwcon = boto.ec2.cloudwatch.CloudWatchConnection()
api = ApiResource(CM_HOST, username="admin", password="admin")
displayName = None
for c in api.get_all_clusters():
displayName = c.displayName
print "Cluster: %s (%s)" % (displayName, c.name)
inst_cache = {}
insts = api.get_all_hosts('full')
print "Found %s in the cluster" % [inst.hostId for inst in insts.objects]
for inst in insts.objects:
clusterName = inst.roleRefs[0].clusterName
if clusterName <> c.name:
print 'Clusters do not correspond: %s vs %s' % (clusterName, c.name)
continue
cores = inst.numCores
inst_id = inst.hostId
inst_cache[inst_id] = my_cache = {}
# For later - we'll send in one data point for every TS query
# that has AWS data
my_cache['aws_info_recorded'] = False
# my_cache['healthSummary'] = inst.healthSummary
ress = ec2con.get_all_reservations(filters={'instance-id' : inst_id})
if len(ress) > 0:
print "Found %s reservations for %s: %s" % (len(ress), inst_id, ress)
res = ress[0]
instances = res.instances
if len(instances) > 1:
print "Found %s instances for %s %s" % (len(instances), inst_id, instances)
inst = instances[0]
if inst.id <> inst_id:
raise Exception("%s != %s" % (inst.id, inst_id))
platform = inst.platform
vpc_id = inst.vpc_id
if platform == 'windows':
product = 'Windows'
elif not platform:
product = 'Linux_UNIX'
else:
product = 'UNKNOWN'
if vpc_id:
product += "_Amazon_VPC"
ami = inst.image_id
my_cache['product'] = product
my_cache['region'] = inst.region.name
my_cache['zone'] = inst.placement
inst_type = inst.instance_type.replace('.','_')
my_cache['inst_type'] = inst_type
time_f = arrow.utcnow().replace(minutes=common.DEFAULT_LOOKBACK_MINUTES)
time_t = arrow.utcnow()
# TODO
# http://arr.gr/blog/2013/08/monitoring-ec2-instance-memory-usage-with-cloudwatch/
# http://blog.sciencelogic.com/netflix-steals-time-in-the-cloud-and-from-users/03/2011
# https://www.stackdriver.com/cpu-steal-why-aws-cloudwatch-metrics-are-different-than-agent-metrics/
stat = cwcon.get_metric_statistics(300,
time_f,
time_t,
'CPUUtilization',
'AWS/EC2',
['Average','Minimum','Maximum'],
{ 'InstanceId' : inst_id })
# [{u'Timestamp': datetime.datetime(2014, 4, 13, 6, 5), u'Average': 0.35250000000000004, u'Minimum': 0.33, u'Maximum': 0.42, u'Unit': u'Percent'}]
print 'Fetching stats for %s: %s' % (inst_id, stat)
if stat:
for s in stat:
ts = common.ts_from_aws(s)
my_cache['avg_cpu'] = float(s['Average'])
else:
print "No stats found for %s" % inst_id
print "Querying CDH."
series = api.query_timeseries('SELECT * WHERE clusterName = %s' % c.name)
for entry in series.objects[0].timeSeries:
# print entry.metadata.__dict__
metric = entry.metadata.metricName
# internal host
hostname = ""
if 'hostname' in entry.metadata.attributes:
host = entry.metadata.attributes['hostname']
inst_id = ""
my_cache = {}
if 'hostId' in entry.metadata.attributes:
#.........这里部分代码省略.........