本文整理匯總了Python中msg_db_connector.MSGDBConnector.commit方法的典型用法代碼示例。如果您正苦於以下問題:Python MSGDBConnector.commit方法的具體用法?Python MSGDBConnector.commit怎麽用?Python MSGDBConnector.commit使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類msg_db_connector.MSGDBConnector
的用法示例。
在下文中一共展示了MSGDBConnector.commit方法的3個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: test_log_successful_export
# 需要導入模塊: from msg_db_connector import MSGDBConnector [as 別名]
# 或者: from msg_db_connector.MSGDBConnector import commit [as 別名]
def test_log_successful_export(self):
"""
Test logging of export results to the export history table.
"""
# @REVIEWED
self.assertTrue(self.exporter.logSuccessfulExport(name = 'test_export',
url =
'http://test_url',
datetime = 0,
size = 100))
conn = MSGDBConnector().connectDB()
cursor = conn.cursor()
dbUtil = MSGDBUtil()
self.assertTrue(
dbUtil.executeSQL(cursor, 'select * from "ExportHistory" where '
'timestamp = '
'to_timestamp(0)'))
self.assertEqual(len(cursor.fetchall()), 1,
"There should only be one result row.")
self.assertTrue(
dbUtil.executeSQL(cursor, 'delete from "ExportHistory" where '
'timestamp = to_timestamp(0)'))
conn.commit()
示例2: logSuccessfulExport
# 需要導入模塊: from msg_db_connector import MSGDBConnector [as 別名]
# 或者: from msg_db_connector.MSGDBConnector import commit [as 別名]
def logSuccessfulExport(self, name = '', url = '', datetime = 0, size = 0):
"""
When an export has been successful, log information about the export
to the database.
The items to log include:
* filename
* URL
* timestamp
* filesize
:param name: String
:param url: String
:param datetime:
:param size: Int
:return: True if no errors occurred, else False.
"""
def exportHistoryColumns():
return ['name', 'url', 'timestamp', 'size']
timestamp = lambda \
datetime: 'to_timestamp(0)' if datetime == 0 else "timestamp " \
"'{}'".format(
datetime)
sql = 'INSERT INTO "{0}" ({1}) VALUES ({2}, {3}, {4}, {5})'.format(
self.configer.configOptionValue('Export', 'export_history_table'),
','.join(exportHistoryColumns()), "'" + name + "'", "'" + url + "'",
timestamp(datetime), size)
conn = MSGDBConnector().connectDB()
cursor = conn.cursor()
dbUtil = MSGDBUtil()
result = dbUtil.executeSQL(cursor, sql, exitOnFail = False)
conn.commit()
return result
示例3: MSGDataAggregator
# 需要導入模塊: from msg_db_connector import MSGDBConnector [as 別名]
# 或者: from msg_db_connector.MSGDBConnector import commit [as 別名]
class MSGDataAggregator(object):
"""
Use for continuous data aggregation of diverse data types relevant to the
Maui Smart Grid project.
Four data types are supported:
1. Irradiance
2. Temperature/Humidity (weather)
3. Circuit
4. eGauge
The general data form conforms to
1. timestamp, subkey_id, val1, val2, val3, ...
2. timestamp, val1, val2, val3, ...
Case (2) is handled within the same space as (1) by testing for the
existence of subkeys.
Current aggregation consists of averaging over **15-min intervals**.
Aggregation is performed in-memory and saved to the DB. The time range is
delimited by start date and end date where the values are included in the
range. The timestamps for aggregation intervals are the last timestamp in a
respective series.
* Aggregation subkeys are values such as eGauge IDs or circuit numbers.
Aggregation is being implemented externally for performance and flexibility
advantages over alternative approaches such as creating a view. It may be
rolled into an internal function at future time if that proves to be
beneficial.
Usage:
from msg_data_aggregator import MSGDataAggregator
aggregator = MSGDataAggregator()
API:
aggregateAllData(dataType = dataType)
aggregateNewData(dataType = dataType)
"""
def __init__(self, exitOnError=True, commitOnEveryInsert=False, testing=False):
"""
Constructor.
:param testing: if True, the testing DB will be connected instead of
the production DB.
"""
self.logger = SEKLogger(__name__, "info")
self.configer = MSGConfiger()
self.conn = MSGDBConnector().connectDB()
self.cursor = self.conn.cursor()
self.dbUtil = MSGDBUtil()
self.notifier = MSGNotifier()
self.mathUtil = MSGMathUtil()
self.timeUtil = MSGTimeUtil()
self.nextMinuteCrossing = {}
self.nextMinuteCrossingWithoutSubkeys = None
self.exitOnError = exitOnError
self.commitOnEveryInsert = commitOnEveryInsert
section = "Aggregation"
tableList = [
"irradiance",
"agg_irradiance",
"weather",
"agg_weather",
"circuit",
"agg_circuit",
"egauge",
"agg_egauge",
]
self.dataParams = {
"weather": ("agg_weather", "timestamp", ""),
"egauge": ("agg_egauge", "datetime", "egauge_id"),
"circuit": ("agg_circuit", "timestamp", "circuit"),
"irradiance": ("agg_irradiance", "timestamp", "sensor_id"),
}
self.columns = {}
# tables[datatype] gives the table name for datatype.
self.tables = {t: self.configer.configOptionValue(section, "{}_table".format(t)) for t in tableList}
for t in self.tables.keys():
self.logger.log("t:{}".format(t), "DEBUG")
try:
self.columns[t] = self.dbUtil.columnsString(self.cursor, self.tables[t])
except TypeError as error:
self.logger.log("Ignoring missing table: Error is {}.".format(error), "error")
def existingIntervals(self, aggDataType="", timeColumnName=""):
"""
Retrieve the existing aggregation intervals for the given data type.
#.........這裏部分代碼省略.........