本文整理汇总了Python中DataDB.getDataConnection方法的典型用法代码示例。如果您正苦于以下问题:Python DataDB.getDataConnection方法的具体用法?Python DataDB.getDataConnection怎么用?Python DataDB.getDataConnection使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类DataDB
的用法示例。
在下文中一共展示了DataDB.getDataConnection方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: getTop10Interval
# 需要导入模块: import DataDB [as 别名]
# 或者: from DataDB import getDataConnection [as 别名]
def getTop10Interval(order=avgRuntimeOrder,interval=None,hostId = 1, limit = 10):
sql = """select regexp_replace("name", E'(\\\\(.*\\\\))','()') AS "name",
round( sum(d_calls) , 0 ) AS "calls",
round( sum(d_total_time) , 0 ) AS "totalTime",
round( sum(d_total_time) / sum(d_calls) , 0 ) AS "avgTime"
from ( """ + getSQL(interval, hostId) + """) tt
where d_calls > 0
group by "name"
order by """+order+""" limit """ + str(adapt(limit))
conn = DataDB.getDataConnection()
cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
cur.execute(sql)
sprocs = []
for record in cur:
record['avgTime'] = makeTimeIntervalReadable(record['avgTime'])
record['totalTime'] = makeTimeIntervalReadable(record['totalTime'])
sprocs.append(record)
conn.close()
return sprocs
示例2: getSprocDataByTags
# 需要导入模块: import DataDB [as 别名]
# 或者: from DataDB import getDataConnection [as 别名]
def getSprocDataByTags():
sql = """select tm_tag_id , sum("yaxis") AS "yaxis_t" , sum("yaxis2") AS "yaxis_c", "xaxis" from (
select group_date(sp_timestamp,30) as "xaxis",
sp_sproc_id,
max(sp_self_time) - min(sp_self_time) as "yaxis",
max(sp_calls) - min(sp_calls) as "yaxis2"
from monitor_data.sproc_performance_data
where sp_timestamp > 'now'::timestamp - '9 days'::interval
group by sp_sproc_id , group_date(sp_timestamp,30) ) data,
monitor_data.sprocs,
monitor_data.tag_members
where sprocs.sproc_id = sp_sproc_id
and tm_sproc_name = sproc_name
and tm_schema = get_noversion_name(sproc_schema)
group by tm_tag_id , "xaxis" order by 4 asc;"""
conn = DataDB.getDataConnection()
cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
cur.execute(sql)
data = collections.defaultdict(list)
for r in cur:
data[r['tm_tag_id']].append((r['xaxis'], r['yaxis_t'], r['yaxis_c']))
cur.close()
DataDB.closeDataConnection(conn)
return data
示例3: getLoadReportData
# 需要导入模块: import DataDB [as 别名]
# 或者: from DataDB import getDataConnection [as 别名]
def getLoadReportData():
conn = DataDB.getDataConnection()
cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
cur.execute("""select load_host_id AS id,
extract(week from load_timestamp)::text AS kw,
round(avg(load_1min_value)/100,2) AS avg,
round(max(load_1min_value)/100,2) AS max,
to_char(min(load_timestamp::date),'dd.mm.YYYY') AS min_date,
to_char(max(load_timestamp::date),'dd.mm.YYYY') AS max_date,
min(load_timestamp::date) AS sort_date
from monitor_data.host_load , monitor_data.hosts
where host_id = load_host_id
and host_enabled
and load_timestamp > ('now'::timestamp - '9 weeks'::interval)
and extract(dow from load_timestamp) IN(1,2,3,4,5)
group by load_host_id, extract(week from load_timestamp)
order by 1 ASC,7 DESC""")
data = defaultdict(list)
lastAvg = None
lastMax = None
lastId = 0
lastRR = None
for r in cur:
rr = {'id' : r['id'],
'avg' : r['avg'],
'max' : r['max'],
'min_date' : r['min_date'],
'max_date' : r['max_date'],
'trendAvg': 0,
'trendMax': 0,
'kw' : r['kw']
}
if lastRR != None and lastRR['id']==rr['id']:
if lastRR['max'] < r['max']:
lastRR['trendMax'] = -1
elif lastRR['max'] > r['max']:
lastRR['trendMax'] = 1
if lastRR['avg'] < r['avg']:
lastRR['trendAvg'] = -1
elif lastRR['avg'] > r['avg']:
lastRR['trendAvg'] = 1
data[int(r['id'])].append(rr);
lastRR = rr
cur.close()
conn.close()
return sorted(data.values(), key = lambda x : hosts.hosts[x[0]['id']]['settings']['uiShortName'])
示例4: load_filter_lines
# 需要导入模块: import DataDB [as 别名]
# 或者: from DataDB import getDataConnection [as 别名]
def load_filter_lines(host_id, _filter = None, interval = None):
conn = DataDB.getDataConnection()
cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
cur.execute(get_filted_query(host_id,_filter,interval))
l = []
for row in cur:
l.append ( ( row['xaxis'], row['yaxis'] ) )
return l
示例5: getTableData
# 需要导入模块: import DataDB [as 别名]
# 或者: from DataDB import getDataConnection [as 别名]
def getTableData(host, name, interval = None):
conn = DataDB.getDataConnection()
cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
cur.execute(getSingleTableSql(host,name,interval))
d = { 'table_size' : [], 'index_size' : [], 'seq_scans': [], 'index_scans' : [], 'ins':[], 'upd':[], 'del':[], 'hot':[] }
last_is = None
last_ss = None
last_ins = None
last_del = None
last_upd = None
last_hot = None
last_timestamp = 0
for r in cur:
d['table_size'].append ( ( r['tsd_timestamp'] , r['tsd_table_size'] ) )
d['index_size'].append ( ( r['tsd_timestamp'] , r['tsd_index_size'] ) )
if int(time.mktime(r['tsd_timestamp'].timetuple()) * 1000) - last_timestamp <= ( 15*60*1000 ):
if last_ss != None:
d['seq_scans'].append ( ( r['tsd_timestamp'] , r['tsd_seq_scans']-last_ss ) )
if last_is != None:
d['index_scans'].append( ( r['tsd_timestamp'] , r['tsd_index_scans'] - last_is ) )
if last_ins != None and last_ins != 0:
d['ins'].append( ( r['tsd_timestamp'] , r['tsd_tup_ins'] - last_ins ) )
if last_del != None and last_del != 0:
d['del'].append( ( r['tsd_timestamp'] , r['tsd_tup_del'] - last_del ) )
if last_upd != None and last_upd != 0:
d['upd'].append( ( r['tsd_timestamp'] , r['tsd_tup_upd'] - last_upd ) )
if last_hot != None and last_hot != 0:
d['hot'].append( ( r['tsd_timestamp'] , r['tsd_tup_hot_upd'] - last_hot ) )
last_is = r['tsd_index_scans']
last_ss = r['tsd_seq_scans']
last_ins = r['tsd_tup_ins']
last_del = r['tsd_tup_del']
last_upd = r['tsd_tup_upd']
last_hot = r['tsd_tup_hot_upd']
last_timestamp = int(time.mktime(r['tsd_timestamp'].timetuple()) * 1000)
cur.close()
DataDB.closeDataConnection(conn)
return d
示例6: getGroupsData
# 需要导入模块: import DataDB [as 别名]
# 或者: from DataDB import getDataConnection [as 别名]
def getGroupsData():
conn = DataDB.getDataConnection()
groups = {}
cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor);
cur.execute("SELECT * FROM monitor_data.host_groups;")
for g in cur:
groups [ g['group_id'] ] = g['group_name']
cur.close();
conn.close();
return groups;
示例7: getHostData
# 需要导入模块: import DataDB [as 别名]
# 或者: from DataDB import getDataConnection [as 别名]
def getHostData():
conn = DataDB.getDataConnection()
hosts = {}
cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor);
cur.execute("SELECT * FROM monitor_data.hosts WHERE host_enabled = true ORDER BY host_id ASC;")
for r in cur:
rr = dict(r);
rr['settings'] = json.loads(rr['host_settings'])
hosts[rr['host_id']] = rr;
cur.close();
conn.close();
return hosts;
示例8: getHostData
# 需要导入模块: import DataDB [as 别名]
# 或者: from DataDB import getDataConnection [as 别名]
def getHostData():
conn = DataDB.getDataConnection()
hosts = {}
cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor);
cur.execute("SELECT * FROM monitor_data.hosts WHERE host_enabled = true ORDER BY host_id ASC;")
for r in cur:
rr = dict(r)
rr['settings'] = json.loads(rr['host_settings'])
rr['uishortname'] = rr['settings']['uiShortName'].lower().replace('-','')
rr['uilongname'] = rr['settings']['uiLongName']
hosts[rr['host_id']] = rr
cur.close()
conn.close()
return hosts
示例9: getSingleSprocData
# 需要导入模块: import DataDB [as 别名]
# 或者: from DataDB import getDataConnection [as 别名]
def getSingleSprocData(name, hostId=1, interval=None, sprocNr = None):
conn = DataDB.getDataConnection()
cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
cur.execute( getSingleSprocSQL(name, hostId, interval, sprocNr ) )
data = { 'calls' : [], 'self_time': [], 'total_time' : [] , 'avg_time' : [] , 'avg_self_time': [] , 'name' : name }
for r in cur:
data['calls'].append( ( r['xaxis'] , r['d_calls'] ) )
data['total_time'].append ( ( r['xaxis'] , r['d_total_time'] ) )
data['self_time'].append ( ( r['xaxis'] , r['d_self_time'] ) )
data['avg_time'].append ( ( r['xaxis'] , r['d_avg_time'] ) )
data['avg_self_time'].append ( ( r['xaxis'] , r['d_avg_self_time'] ) )
cur.close()
DataDB.closeDataConnection(conn)
return data
示例10: getCpuLoad
# 需要导入模块: import DataDB [as 别名]
# 或者: from DataDB import getDataConnection [as 别名]
def getCpuLoad(hostId=1):
load = { "load_15min_avg" : [] , "load_15min_max" : [] }
sql = """ SELECT date_trunc('hour'::text, load_timestamp) + floor(date_part('minute'::text, load_timestamp) / 15::double precision) * '00:15:00'::interval AS load_timestamp,
AVG(load_1min_value) AS load_15min_avg,
MAX(load_1min_value) AS load_15min_max
FROM monitor_data.host_load WHERE load_host_id = """ + str(adapt(hostId)) + """ AND load_timestamp > ('now'::timestamp - '9 days'::interval)
GROUP BY date_trunc('hour'::text, load_timestamp) + floor(date_part('minute'::text, load_timestamp) / 15::double precision) * '00:15:00'::interval
ORDER BY 1 ASC """
conn = DataDB.getDataConnection()
cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
cur.execute(sql)
for record in cur:
load['load_15min_avg'].append( (record['load_timestamp'] , round( float(record['load_15min_avg'])/100,2) ) )
load['load_15min_max'].append( (record['load_timestamp'] , round( float(record['load_15min_max'])/100,2) ) )
return load
示例11: getWalVolumes
# 需要导入模块: import DataDB [as 别名]
# 或者: from DataDB import getDataConnection [as 别名]
def getWalVolumes(hostId=1):
load = { "wal_15min_growth" : []}
sql = """
SELECT
date_trunc('hour'::text, load_timestamp) + floor(date_part('minute'::text, load_timestamp) / 15::double precision) * '00:15:00'::interval AS load_timestamp,
coalesce(max(xlog_location_mb)-min(xlog_location_mb),0) AS wal_15min_growth
FROM monitor_data.host_load WHERE load_host_id = """ + str(adapt(hostId)) + """ AND load_timestamp > ('now'::timestamp - '9 days'::interval)
GROUP BY date_trunc('hour'::text, load_timestamp) + floor(date_part('minute'::text, load_timestamp) / 15::double precision) * '00:15:00'::interval
ORDER BY 1 ASC
"""
conn = DataDB.getDataConnection()
cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
cur.execute(sql)
for record in cur:
load['wal_15min_growth'].append( (record['load_timestamp'] , record['wal_15min_growth'] ) )
return load
示例12: getSprocsOrderedBy
# 需要导入模块: import DataDB [as 别名]
# 或者: from DataDB import getDataConnection [as 别名]
def getSprocsOrderedBy( hostId, order = " ORDER BY SUM(delta_total_time) DESC"):
sql = """SELECT sproc_name
FROM ( """ + viewSprocs() + """ ) t JOIN monitor_data.sprocs ON sp_sproc_id = sproc_id
WHERE sproc_host_id = """ + str(hostId) + """
GROUP BY sproc_name
""" + order + """;
"""
conn = DataDB.getDataConnection()
cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
list= []
cur.execute( sql )
for r in cur:
list.append ( r['sproc_name'] )
cur.close()
DataDB.closeDataConnection(conn)
return list
示例13: getTableIOData
# 需要导入模块: import DataDB [as 别名]
# 或者: from DataDB import getDataConnection [as 别名]
def getTableIOData(host, name, interval = None):
conn = DataDB.getDataConnection()
cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
cur.execute(getSingleTableIOSql(host,name,interval))
d = { 'heap_read' : [], 'heap_hit' : [], 'index_read' : [], 'index_hit': [] }
last_hr = None
last_hh = None
last_ir = None
last_ih = None
last_timestamp = 0
for r in cur:
if int(time.mktime(r['tio_timestamp'].timetuple()) * 1000) - last_timestamp <= ( 15*60*1000 ):
if last_hr != None:
d['heap_read'].append(( r['tio_timestamp'] , r['tio_heap_read'] - last_hr ))
if last_hh != None:
d['heap_hit'].append(( r['tio_timestamp'] , r['tio_heap_hit'] - last_hh ))
if last_ir != None:
d['index_read'].append(( r['tio_timestamp'] , r['tio_idx_read'] - last_ir ))
if last_ih != None:
d['index_hit'].append(( r['tio_timestamp'] , r['tio_idx_hit'] - last_ih ))
last_hr = r['tio_heap_read']
last_hh = r['tio_heap_hit']
last_ir = r['tio_idx_read']
last_ih = r['tio_idx_hit']
last_timestamp = int(time.mktime(r['tio_timestamp'].timetuple()) * 1000)
cur.close()
DataDB.closeDataConnection(conn)
return d
示例14: getLoad
# 需要导入模块: import DataDB [as 别名]
# 或者: from DataDB import getDataConnection [as 别名]
def getLoad(hostId=1):
sql = """select xaxis, sum(d_self_time) OVER (ORDER BY xaxis ASC ROWS BETWEEN 1 PRECEDING AND CURRENT ROW) / (1*15*60*1000) AS load_15min,
sum(d_self_time) OVER (ORDER BY xaxis ASC ROWS BETWEEN 4 PRECEDING AND CURRENT ROW) / (4*15*60*1000) AS load_1hour
from ( select xaxis,sum(d_self_time) d_self_time from (""" + getSQL("('now'::timestamp - '9 days'::interval)" ,hostId) + """) dataTabel group by xaxis ) loadTable """
conn = DataDB.getDataConnection()
cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
load = { 'load_15min' : [], 'load_1hour': [] }
print ( sql )
cur.execute(sql)
lastTime = None
skip15min=0
skip1h=0
for record in cur:
currentTime = int(time.mktime(record['xaxis'].timetuple()) * 1000)
if lastTime != None:
if currentTime - lastTime > ( 15 * 60 * 1000):
skip15min = 2
skip1h=5
if skip15min>0:
skip15min -= 1
else:
load['load_15min'].append((record['xaxis'], round ( record['load_15min'], 2 ) ) )
if skip1h > 0:
skip1h -= 1
else:
load['load_1hour'].append((record['xaxis'], round ( record['load_1hour'] , 2 )))
lastTime = int(time.mktime(record['xaxis'].timetuple()) * 1000)
cur.close()
conn.close()
return load
示例15: getTablePerformanceIssues
# 需要导入模块: import DataDB [as 别名]
# 或者: from DataDB import getDataConnection [as 别名]
def getTablePerformanceIssues(hostname, date_from, date_to):
conn = DataDB.getDataConnection()
cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
cur.execute("""select * from monitor_data.get_table_threshold_sinners_for_period(%s,%s,%s)""", (hostname, date_from, date_to))
data = [] # cur.fetchall()
for r in cur:
row = {'host_name' : r['host_name'],
'host_id' : r['host_id'],
'schema_name' : r['schema_name'],
'table_name' : r['table_name'],
'day' : r['day'],
'scan_change_pct' : r['scan_change_pct'],
'scans1': r['scans1'],
'scans2': r['scans2'],
'size1': r['size1'],
'size2': r['size2'],
'size_change_pct': r['size_change_pct'],
'allowed_seq_scan_pct': r['allowed_seq_scan_pct'],
}
data.append(row)
cur.close()
conn.close()
return data