本文整理匯總了Python中virtualisation.misc.log.Log.i方法的典型用法代碼示例。如果您正苦於以下問題:Python Log.i方法的具體用法?Python Log.i怎麽用?Python Log.i使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類virtualisation.misc.log.Log
的用法示例。
在下文中一共展示了Log.i方法的13個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: from virtualisation.misc.log import Log [as 別名]
# 或者: from virtualisation.misc.log.Log import i [as 別名]
def __init__(self, gdi_config, rm):
self.rm = rm
self.PAGINATION_LIMIT = 100
connect_str = "host='%s' dbname='%s' user='%s' password='%s' port=%d" % (
gdi_config.host, gdi_config.database, gdi_config.username, gdi_config.password, gdi_config.port)
self.conn = psycopg2.connect(connect_str)
self.curs = self.conn.cursor()
try:
self.curs.execute("CREATE SCHEMA IF NOT EXISTS %s;" % SQL.SCHEMA)
# self.curs.execute("CREATE TABLE IF NOT EXISTS %s.cp_sensors (sensor_uuid UUID CONSTRAINT uuid_key PRIMARY KEY, sensor_annotation_id VARCHAR, sercvice_category VARCHAR, traffic INTEGER, geom GEOMETRY(GEOMETRY, 4326) );" % (SQL.SCHEMA,))
self.curs.execute("CREATE TABLE IF NOT EXISTS %s.cp_sensors (sensor_uuid UUID CONSTRAINT uuid_key PRIMARY KEY, sensor_annotation_id VARCHAR, sercvice_category VARCHAR, traffic INTEGER, geom GEOMETRY(GEOMETRY, 4326) );" % ("public",))
cols = ["sampling_time TIMESTAMP", "sensor_uuid UUID", "observation_uuid UUID", "data JSON", "quality JSON"]
query = 'CREATE TABLE IF NOT EXISTS %s.cp_observations ( %s, PRIMARY KEY (%s), FOREIGN KEY (sensor_uuid) REFERENCES %s.cp_sensors(sensor_uuid));\n' % (SQL.SCHEMA, ', '.join(cols), ", ".join(["observation_uuid"]), "public")
self.curs.execute(query)
# index over sampling_time and sensor_uuid
# since a 'IF NOT EXISTS' is not available for us (version < 9.5)
# the error is catched in a separate try-catch
try:
query = 'CREATE INDEX "timeindex" ON %s.cp_observations USING btree (sampling_time);' % (SQL.SCHEMA,)
self.curs.execute(query)
query = 'CREATE INDEX uuidindex ON %s.cp_observations USING btree (sensor_uuid);' % (SQL.SCHEMA,)
self.curs.execute(query)
except:
pass
# primary secondary observation_uuid map
query = 'CREATE TABLE IF NOT EXISTS %s.p_s_observation_uuid (main UUID, secondary UUID);' % (SQL.SCHEMA,)
self.curs.execute(query)
self.conn.commit()
L.i("SQL: schema/tables created")
except Exception as e:
L.e("SQL: Could not create schema/tables", e)
self.conn.rollback()
示例2: start
# 需要導入模塊: from virtualisation.misc.log import Log [as 別名]
# 或者: from virtualisation.misc.log.Log import i [as 別名]
def start(self, restart=False):
self._startQueues()
if self.clock:
self.clock.stop()
self.clock = RealClock(self.end)
if ResourceManagement.args.pt:
from virtualisation.resourcemanagement.performancetestreceiver import PerformanceMeterMinutes
performancetest = PerformanceMeterMinutes()
for w in self.wrappers:
self.startWrapper(w, restart)
if ResourceManagement.args.pt:
w.addReceiver(performancetest)
L.i(datetime.datetime.now())
if not self.args.noQuality:
if not self.averageStreamQuality:
self.averageStreamQuality = AverageStreamQuality(self, self.clock)
else:
self.averageStreamQuality.setClock(self.clock)
self.clock.runAsync()
raw_input("press Enter to end.\n")
self.clock.stop()
示例3: deploy
# 需要導入模塊: from virtualisation.misc.log import Log [as 別名]
# 或者: from virtualisation.misc.log.Log import i [as 別名]
def deploy(self, f, autostart=False):
"""
:param f:
:param autostart:
:return: a tuple with 3 elements. 1. status as string, 2. error message as string, 3. list of uuids of added wrapper
"""
L.i("Deploying", f)
sensordescriptions = []
try:
zFile = zipfile.ZipFile(f)
if "deploy.json" in zFile.namelist():
deployDescription = JOb(zFile.open("deploy.json", "r"))
sys.path.insert(0, f)
if deployDescription.isList():
for dd in deployDescription:
module = __import__(dd.module)
wrapper = getattr(module, dd["class"])()
self.addWrapper(wrapper)
sensordescriptions.append(wrapper.getSensorDescription())
if autostart:
self.startWrapper(wrapper)
else:
module = __import__(deployDescription.module)
wrapper = getattr(module, deployDescription["class"])()
self.addWrapper(wrapper)
sensordescriptions.append(wrapper.getSensorDescription())
if autostart:
self.startWrapper(wrapper)
return "OK", "", sensordescriptions
except Exception as e:
L.w("Deployment of wrapper", f, "failed.", e.message)
return "Fail", e.message, []
示例4: __run
# 需要導入模塊: from virtualisation.misc.log import Log [as 別名]
# 或者: from virtualisation.misc.log.Log import i [as 別名]
def __run(self):
while not self.stop:
sleep(self.delay)
Log.i(self.counter, self.txt)
# print self.counter, self.txt
# print "ThreadedTriplestoreAdapter Buffer Size:", ThreadedTriplestoreAdapter.getTotalBufferSize()
Log.i("ThreadedTriplestoreAdapter Buffer Size:", ThreadedTriplestoreAdapter.getTotalBufferSize())
self.counter = 0
示例5: setTimeframe
# 需要導入模塊: from virtualisation.misc.log import Log [as 別名]
# 或者: from virtualisation.misc.log.Log import i [as 別名]
def setTimeframe(self, startdate, enddate):
if not isinstance(self.data, list):
Log.i("Searching start date in historic data for", self.wrapper.getSensorDescription().sensorID, "...")
if self.data.scrollTo(startdate):
Log.i("done")
else:
Log.w("no historic data beginning at", startdate, "found")
super(CSVHistoryReader, self).setTimeframe(startdate, enddate)
示例6: setReplayMode
# 需要導入模塊: from virtualisation.misc.log import Log [as 別名]
# 或者: from virtualisation.misc.log.Log import i [as 別名]
def setReplayMode(self, mode):
if mode:
try:
Log.i("loading history for", self.sensorDescription.sensorID, "...")
self.historyreader = CSVHistoryReader(self, AbstractWrapper.getFileObject(__file__, os.path.join("historicdata", "trafficData%d.csv" % self.sensorDescription.sensorID), "rU"))
self.historyreader.multiple_observations = False
Log.i("done")
self.historyparser = CSVParser(self, self.historyreader.headers)
# connection will be set automatically by the AbstractComposedWrapper to SplitterConnection
except Exception as e:
Log.e(e)
self.historyreader = None
super(InternalWrapper, self).setReplayMode(mode)
示例7: end
# 需要導入模塊: from virtualisation.misc.log import Log [as 別名]
# 或者: from virtualisation.misc.log.Log import i [as 別名]
def end(self):
for w in self.wrappers:
w.stop()
if ResourceManagement.args.messagebus:
self.messageBusQueue.stop()
if ResourceManagement.args.aggregate:
self.aggregationQueue.stop()
if self.eventWrapper:
self.eventWrapper.stop()
self.receiverQueue.stop()
if ResourceManagement.args.triplestore:
ThreadedTriplestoreAdapter.stop()
self.stopInterface()
L.i("REPLAY ENDED")
示例8: setReplayMode
# 需要導入模塊: from virtualisation.misc.log import Log [as 別名]
# 或者: from virtualisation.misc.log.Log import i [as 別名]
def setReplayMode(self, mode):
if mode:
try:
Log.i("loading history data for", self.sensorDescription.sensorID, "...")
self.historyreader = CSVHistoryReader(self, AbstractWrapper.getFileObject(__file__, os.path.join("historicdata", "aarhus_parking-%s.csv" % self.sensorDescription.sensorID), "rU"), timestampfield="updatetime")
# Must preserve the order as in the CSV but use the names as in senordescription.fields
# vehiclecount,updatetime,_id,totalspaces,garagecode,streamtime
self.historyparser = CSVParser(self, ["vehicleCount", "updatetime", "_id", "totalSpaces", "garageCode", "st"], timestampfield="updatetime")
Log.i("done")
except Exception as e:
Log.e(e)
self.historyreader = None
super(InternalWrapper, self).setReplayMode(mode)
示例9: addWrapper
# 需要導入模塊: from virtualisation.misc.log import Log [as 別名]
# 或者: from virtualisation.misc.log.Log import i [as 別名]
def addWrapper(self, wrapper):
# TODO: this should not be here
if ResourceManagement.args.cleartriplestore:
self.deleteGraphs(wrapper)
sd = wrapper.getSensorDescription()
try:
if isinstance(sd, list):
for _sd in sd:
try:
_sd.test()
if ResourceManagement.args.aggregate:
self.aggregator.wrapper_added(_sd)
if self.gdiInterface:
self.gdiInterface.registerSensorStreamFromWKT(
_sd.uuid, _sd.sensorID, _sd.sensorType, _sd.location, _sd.location_epsg or 4326
)
# if self.sql:
# self.sql.create_table(_sd)
L.i("added wrapper with ID", _sd.sensorID)
except Exception as ex:
L.e("Error deploying wrapper:", str(ex))
else:
try:
sd.test()
if ResourceManagement.args.aggregate:
self.aggregator.wrapper_added(sd)
if self.gdiInterface:
self.gdiInterface.registerSensorStreamFromWKT(
sd.uuid, sd.sensorID, sd.sensorType, sd.location, sd.location_epsg or 4326
)
# if self.sql:
# self.sql.create_table(sd)
L.i("added wrapper with ID", sd.sensorID)
except Exception as ex:
L.e("Error deploying wrapper:", str(ex))
if ResourceManagement.args.triplestore or ResourceManagement.args.messagebus:
# StaticAnnotator.staticAnnotationSensor(wrapper, self.config, self.messageBusQueue, self.rabbitmqchannel)
StaticAnnotator.threadedStaticAnnotationSensor(wrapper, self.config, self.messageBusQueue, self.ui.api)
if ResourceManagement.args.messagebus:
wrapper.setMessageBusQueue(self.messageBusQueue)
self.wrappers.append(wrapper)
except Exception as ex:
L.e(self.__class__.__name__, "Error in addWrapper:", str(ex))
示例10: tick
# 需要導入模塊: from virtualisation.misc.log import Log [as 別名]
# 或者: from virtualisation.misc.log.Log import i [as 別名]
def tick(self):
# Log.d2("AbstractClock tick")
self.dayPrintCounter -= 1
if self.dayPrintCounter == 0:
self.dayPrintCounter = AbstractClock.secondsInADay
Log.i("It is", self.now())
print "It is", self.now()
# Log.d2("AbstractClock notification handling")
notifications = filter(self.filtermethod, self.notifications.copy().iteritems())
self.pause(len(notifications))
for k, v in notifications:
v.do()
del notifications
# Log.d2("AbstractClock notification handling finished")
# Log.d2("AbstractClock job")
jobs = filter(self.filtermethod, self.jobs.copy().iteritems())
for k, v in jobs:
if self.execute_jobs_async:
threading.Thread(target=v.do).start()
else:
v.do()
del jobs
# Log.d2("AbstractClock job handling finished")
# Log.d2("AbstractClock delete self.notifications/jobs")
self.lock.acquire()
notifications = filter(self.removefiltermethod, self.notifications.copy().iteritems())
for k, v in notifications:
del self.notifications[k]
jobs = filter(self.removefiltermethod, self.jobs.copy().iteritems())
for k, v in jobs:
if k in self.jobs:
del self.jobs[k]
# Log.d2("AbstractClock delete finished")
# tmp = filter(self.removefiltermethod, self.notifications.copy().iteritems())
# del self.notifications
# self.notifications = tmp
# tmp = filter(self.removefiltermethod, self.jobs.copy().iteritems())
# del self.jobs
# self.jobs = tmp
self.lock.release()
示例11: start_messagebus
# 需要導入模塊: from virtualisation.misc.log import Log [as 別名]
# 或者: from virtualisation.misc.log.Log import i [as 別名]
def start_messagebus(self, args):
L.i("Connecting to the message bus")
self.messageBusQueue = QueueThread(handler=self.sendMessageHandler)
try:
# prepare RabbitMQ configuration
rmq_host = str(self.config.rabbitmq.host)
rmq_port = self.config.rabbitmq.port
rmq_username = self.config.rabbitmq.username if "username" in self.config.rabbitmq else None
rmq_password = self.config.rabbitmq.username if "password" in self.config.rabbitmq else None
if rmq_username:
if rmq_password:
RabbitMQ.establishConnection(rmq_host, rmq_port, rmq_username, rmq_password)
else:
RabbitMQ.establishConnection(rmq_host, rmq_port, rmq_username)
else:
RabbitMQ.establishConnection(rmq_host, rmq_port)
L.i("Connected to the message bus")
self.messageBusQueue.start()
except MessageBusConnectionError:
self.args.messagebus = False
args.messagebus = False
L.w("Could not connect to MessageBus server. Disabling MessageBus feature.")
示例12: startReplay
# 需要導入模塊: from virtualisation.misc.log import Log [as 別名]
# 或者: from virtualisation.misc.log.Log import i [as 別名]
def startReplay(self):
# cherrypy.tree.mount(dowser.Root(), '/dowser')
self._startQueues()
method = self.replayEnd
args = None
start_time = datetime.datetime.now()
# if continuelive enabled set "start" as method to set the system to live mode if historic replay is finished
if ResourceManagement.args.continuelive:
method = self.start
args = True
if ResourceManagement.args.speed:
self.clock = ReplayClock(ResourceManagement.args.speed, endCallback=method, endCallbackArgs=args)
else:
self.clock = ReplayClock(endCallback=method)
if ResourceManagement.args.end and ResourceManagement.args.start:
try:
startDate = datetime.datetime.strptime(ResourceManagement.args.start, ReplayClock.parserformat)
endDate = datetime.datetime.strptime(ResourceManagement.args.end, ReplayClock.parserformat)
# for w in self.wrappers:
# w.setTimeframe(startDate, endDate)
if startDate > endDate:
L.w("start date after end date. Changing both")
tmp = endDate
endDate = startDate
startDate = tmp
self.clock.setTimeframe(startDate, endDate)
except Exception as e:
L.e("Problem parsing start- or end date:", str(e))
raise e
else:
raise Exception("start- and enddate required for replay mode")
if ResourceManagement.args.pt:
from virtualisation.resourcemanagement.performancetestreceiver import PerformanceMeterMinutes
performancetest = PerformanceMeterMinutes() # PerformanceMeterSeconds()
for w in self.wrappers:
w.setReplayMode(True)
w.setClock(self.clock)
w.setTimeframe(startDate, endDate)
w.addReceiver(self)
if ResourceManagement.args.pt:
w.addReceiver(performancetest)
w.start()
w.runReplay()
if not self.args.noQuality:
if not self.averageStreamQuality:
self.averageStreamQuality = AverageStreamQuality(self, self.clock)
else:
self.averageStreamQuality.setClock(self.clock)
self.clock.runAsync()
self.startMonitor()
if not ResourceManagement.args.continuelive:
raw_input("press Enter to end.\n")
self.clock.stop()
L.i("Runtime", datetime.datetime.now() - start_time)
示例13: update
# 需要導入模塊: from virtualisation.misc.log import Log [as 別名]
# 或者: from virtualisation.misc.log.Log import i [as 別名]
def update(self):
from virtualisation.resourcemanagement.resourcemanagement import ResourceManagement
# print "time", self.clock.now()
latStart = datetime.now()
L.d("processing:", self.getSensorDescription().sensorID)
# L.d(self.clock.now())
if self.replaymode:
self.stats.startMeasurement("Update_replay")
# self.clock.pause()
if self.historyreader:
L.d2("abstractwrapper get data")
self.stats.startMeasurement("Update_replay.Historyreader")
data_raw = self.historyreader.tick(self.clock)
self.stats.stopMeasurement("Update_replay.Historyreader")
L.d2("abstractwrapper received data:", str(data_raw))
if data_raw:
data_list = [data_raw] if not self.historyreader.multiple_observations else data_raw
for data in data_list:
try:
L.d2("abstractwrapper parse data")
# print "data to parse", data
self.stats.startMeasurement("Update_replay.Historyparser")
parsed = self.historyparser.parse(data, self.clock)
self.stats.stopMeasurement("Update_replay.Historyparser")
L.d2("abstractwrapper parsed data:", str(parsed))
del data
if parsed:
self.stats.startMeasurement("Update_replay.Preparation")
ObservationIDGenerator.addObservationIDToFields(parsed)
parsed.producedInReplayMode = True
parsed.recovered = False
parsed.latency = (datetime.now() - latStart).total_seconds()
self.stats.stopMeasurement("Update_replay.Preparation")
# QoI Start
quality = None
if self.qoiSystem:
L.d2("abstractwrapper get quality")
self.stats.startMeasurement("Update_replay.Quality")
quality = self.qoiSystem.addData(self.getSensorDescription(), parsed, self.clock)
self.stats.stopMeasurement("Update_replay.Quality")
L.d2("abstractwrapper quality:", quality)
if self.faultRecoveryActive:
L.d2("abstractwrapper update fault recovery")
self.stats.startMeasurement("Update_replay.FaultRecoveryUpdate")
self.updateFaultRecoveries(parsed, quality)
self.stats.stopMeasurement("Update_replay.FaultRecoveryUpdate")
L.d2("abstractwrapper fault recovery updated")
self.stats.startMeasurement("Update_replay.Receiver")
for r in self.receiver:
L.d2("abstractwrapper start receiver", r)
r.receive(parsed, self.getSensorDescription(), self.clock, quality)
L.d2("abstractwrapper receiver", r, "finished")
self.stats.stopMeasurement("Update_replay.Receiver")
except Exception as e:
L.e("Error while updating sensor", self.getSensorDescription().fullSensorID, e)
finally:
if ResourceManagement.args.gentle:
self.clock.sleep()
else:
L.d("there is no data, ask fault recovery1")
# L.i(self.getSensorDescription().sensorID)
# L.i(self.clock.now())
try:
self.stats.startMeasurement("Update_replay.Recovery")
data = JSONObject()
data.latency = 0
data.producedInReplayMode = True
data.recovered = True
data.fields = []
for n in self.getSensorDescription().fields:
if n in self.faultRecoveries and self.faultRecoveries[n].isReady():
data.fields.append(n)
data[n] = JSONObject()
# at this point the dataType is in FAULT_RECOVERY_SUPPORTED_DATATYPES and we can safely use cast
data[n].value = self.faultRecoveryCast(
self.faultRecoveries[n].getEstimation(),
self.getSensorDescription().field[n].dataType,
)
data[n].propertyName = self.getSensorDescription().field[n].propertyName
data[n].propertyURI = self.getSensorDescription().field[n].propertyURI
if "unit" in self.getSensorDescription().field[n]:
data[n].unit = self.getSensorDescription().field[n].unit
data[n].sensorID = self.getSensorDescription().fullSensorID
data[n].observationSamplingTime = self.clock.timeAsString()
data[n].observationResultTime = data[n].observationSamplingTime
self.stats.stopMeasurement("Update_replay.Recovery")
self.stats.startMeasurement("Update_replay.ObservationIDGenerator")
ObservationIDGenerator.addObservationIDToFields(data)
self.stats.stopMeasurement("Update_replay.ObservationIDGenerator")
quality = None
if self.qoiSystem:
self.stats.startMeasurement("Update_replay.Quality")
quality = self.qoiSystem.addData(self.getSensorDescription(), data, self.clock)
self.stats.stopMeasurement("Update_replay.Quality")
#.........這裏部分代碼省略.........