本文整理汇总了Python中volttron.platform.agent.utils.get_aware_utc_now函数的典型用法代码示例。如果您正苦于以下问题:Python get_aware_utc_now函数的具体用法?Python get_aware_utc_now怎么用?Python get_aware_utc_now使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了get_aware_utc_now函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_agent_logs
def test_agent_logs(volttron_instance1, agent):
"""
Test if alert agent's start and stop time are getting logged correctly
:param volttron_instance1: instance in which alert agent is running
:param agent: fake agent used to make rpc calls to alert agent
"""
global alert_messages, db_connection, alert_uuid
stop_t = get_aware_utc_now()
volttron_instance1.stop_agent(alert_uuid)
gevent.sleep(1)
c = db_connection.cursor()
c.execute("SELECT * FROM agent_log "
"WHERE start_time IS NOT NULL AND "
"stop_time > '{}'".format(stop_t))
r = c.fetchall()
assert len(r) == 1
start_t = get_aware_utc_now()
volttron_instance1.start_agent(alert_uuid)
gevent.sleep(4)
stop_t = get_aware_utc_now()
volttron_instance1.stop_agent(alert_uuid)
c.execute("SELECT * FROM agent_log "
"WHERE start_time > '{}' AND "
"stop_time > '{}'".format(start_t, stop_t))
r = c.fetchall()
assert len(r) == 1
volttron_instance1.start_agent(alert_uuid)
gevent.sleep(1)
示例2: pre_aggr
def pre_aggr(self, cur_analysis_time):
_log.debug("AfddAggrAgent: start aggregating result...")
# Do aggregation at the beginning of day, week, month
if cur_analysis_time.minute == 0: # start of hour
if cur_analysis_time.hour == 0: # start of day
self.daily_aggr(cur_analysis_time)
if cur_analysis_time.weekday() == 0: # start of week (Monday)
self.weekly_aggr(cur_analysis_time)
if cur_analysis_time.day == 1: # start of month
self.monthly_aggr(cur_analysis_time)
# Schedule for next hour
next_analysis_time = cur_analysis_time.replace(minute=0,
second=0,
microsecond=0)
next_analysis_time += timedelta(hours=1)
cur_analysis_time_utc = cur_analysis_time.astimezone(pytz.utc)
next_run_time_utc = cur_analysis_time_utc + timedelta(hours=1)
# Set next_run_time to 15s after current run if it is in the past
if self.op_mode == 'manual':
if next_run_time_utc < get_aware_utc_now():
next_run_time_utc = get_aware_utc_now() + timedelta(seconds=60)
self.core.schedule(next_run_time_utc, self.pre_aggr, next_analysis_time)
示例3: periodic_read
def periodic_read(self, now):
# we not use self.core.schedule to prevent drift.
next_scrape_time = now + datetime.timedelta(seconds=self.interval)
# Sanity check now.
# This is specifically for when this is running in a VM that gets
# suspended and then resumed.
# If we don't make this check a resumed VM will publish one event
# per minute of
# time the VM was suspended for.
test_now = utils.get_aware_utc_now()
if test_now - next_scrape_time > datetime.timedelta(seconds=self.interval):
next_scrape_time = self.find_starting_datetime(test_now)
self.periodic_read_event = self.core.schedule(next_scrape_time, self.periodic_read, next_scrape_time)
_log.debug("scraping device: " + self.device_name)
try:
results = self.interface.scrape_all()
except Exception as ex:
_log.error('Failed to scrape ' + self.device_name + ': ' + str(ex))
return
if results:
utcnow_string = utils.format_timestamp(utils.get_aware_utc_now())
headers = {headers_mod.DATE: utcnow_string,
headers_mod.TIMESTAMP: utcnow_string, }
for point, value in results.iteritems():
depth_first_topic, breadth_first_topic = self.get_paths_for_point(point)
message = [value, self.meta_data[point]]
self._publish_wrapper(depth_first_topic, headers=headers, message=message)
self._publish_wrapper(breadth_first_topic, headers=headers, message=message)
message = [results, self.meta_data]
self._publish_wrapper(self.all_path_depth, headers=headers, message=message)
self._publish_wrapper(self.all_path_breadth, headers=headers, message=message)
示例4: publish_target_info_pgne
def publish_target_info_pgne(self, cur_analysis_time_utc):
cur_analysis_time_utc = parser.parse(cur_analysis_time_utc)
target_messages = self.get_target_info_pgne(format_timestamp(cur_analysis_time_utc), 'UTC')
if len(target_messages) > 0:
target_topic = '/'.join(['analysis', 'target_agent', self.site, self.building, 'goal'])
for target_message in target_messages:
headers = {'Date': format_timestamp(get_aware_utc_now())}
self.vip.pubsub.publish(
'pubsub', target_topic, headers, target_message).get(timeout=15)
_log.debug("TargetAgent {topic}: {value}".format(
topic=target_topic,
value=target_message))
gevent.sleep(2)
# Schedule next run at min 30 of next hour only if current min >= 30
one_hour = timedelta(hours=1)
cur_min = cur_analysis_time_utc.minute
next_analysis_time = cur_analysis_time_utc.replace(minute=30,
second=0,
microsecond=0)
if cur_min >= 30:
next_analysis_time += one_hour
next_run_time = next_analysis_time
if self.dr_mode == 'dev':
next_run_time = get_aware_utc_now() + timedelta(seconds=15)
if self.dr_mode != 'manual':
self.core.schedule(next_run_time, self.publish_target_info,
format_timestamp(next_analysis_time))
示例5: test_for_duplicate_logs
def test_for_duplicate_logs(volttron_instance1, agent, cleanup_db):
"""
Test if records are not getting duplicated in database after every watch
time interval. When a topic is not seen within the configured time
frame a single row is inserted into database for that topic. When the topic
is seen again the same row is updated with timestamp of when the
topic message was seen.
:param volttron_instance1: instance in which alert agent is running
:param agent: fake agent used to make rpc calls to alert agent
:param cleanup_db: function scope fixture to clean up alert and agent log
tables in database.
"""
global db_connection, alert_messages, alert_uuid
volttron_instance1.stop_agent(alert_uuid)
gevent.sleep(1)
start_t = get_aware_utc_now()
volttron_instance1.start_agent(alert_uuid)
gevent.sleep(6)
c = db_connection.cursor()
c.execute('SELECT * FROM topic_log '
'WHERE first_seen_after_timeout is NULL '
'AND last_seen_before_timeout is NULL'.format(start_t))
results = c.fetchall()
assert results is not None
assert len(results)
gevent.sleep(6)
c = db_connection.cursor()
c.execute('SELECT * FROM topic_log '
'WHERE first_seen_after_timeout is NULL '
'AND last_seen_before_timeout is NULL'.format(start_t))
results = c.fetchall()
assert results is not None
assert len(results) == 3
publish_time = get_aware_utc_now()
agent.vip.pubsub.publish(peer='pubsub',
topic='fakedevice')
agent.vip.pubsub.publish(peer='pubsub',
topic='fakedevice2/all',
message=[{'point': 'value'}])
gevent.sleep(2)
c = db_connection.cursor()
c.execute('SELECT topic, last_seen_before_timeout, '
'first_seen_after_timeout FROM topic_log ')
results = c.fetchall()
assert len(results) == 3
for r in results:
assert r[1] is None
non_utc = publish_time.replace(tzinfo=None)
assert r[2] >= non_utc
示例6: _on_device_message
def _on_device_message(self, peer, sender, bus, topic, headers, message):
# only deal with agents that have not been forwarded.
if headers.get('X-Forwarded', None):
return
# only listen to the ending all message.
if not re.match('.*/all$', topic):
return
topicsplit = topic.split('/')
# For devices we use everything between devices/../all as a unique
# key for determining the last time it was seen.
key = '/'.join(topicsplit[1: -1])
anon_topic = self._topic_replace_map[key]
if not anon_topic:
anon_topic = key
for sr in self._topic_replace_list:
_log.debug(
'anon replacing {}->{}'.format(sr['from'], sr['to']))
anon_topic = anon_topic.replace(sr['from'],
sr['to'])
_log.debug('anon after replacing {}'.format(anon_topic))
_log.debug('Anon topic is: {}'.format(anon_topic))
self._topic_replace_map[key] = anon_topic
_log.debug('DEVICES ON PLATFORM ARE: {}'.format(self._devices))
self._devices[anon_topic] = {
'points': message[0].keys(),
'last_published_utc': format_timestamp(get_aware_utc_now())
}
示例7: backup_new_data
def backup_new_data(self, new_publish_list):
"""
:param new_publish_list: A list of records to cache to disk.
:type new_publish_list: list
"""
_log.debug("Backing up unpublished values.")
c = self._connection.cursor()
if self._backup_storage_limit_gb is not None:
def page_count():
c.execute("PRAGMA page_count")
return c.fetchone()[0]
while page_count() >= self.max_pages:
self._owner().vip.pubsub.publish('pubsub', 'backupdb/nomore')
c.execute(
'''DELETE FROM outstanding
WHERE ROWID IN
(SELECT ROWID FROM outstanding
ORDER BY ROWID ASC LIMIT 100)''')
for item in new_publish_list:
source = item['source']
topic = item['topic']
meta = item.get('meta', {})
values = item['readings']
topic_id = self._backup_cache.get(topic)
if topic_id is None:
c.execute('''INSERT INTO topics values (?,?)''',
(None, topic))
c.execute('''SELECT last_insert_rowid()''')
row = c.fetchone()
topic_id = row[0]
self._backup_cache[topic_id] = topic
self._backup_cache[topic] = topic_id
meta_dict = self._meta_data[(source, topic_id)]
for name, value in meta.iteritems():
current_meta_value = meta_dict.get(name)
if current_meta_value != value:
c.execute('''INSERT OR REPLACE INTO metadata
values(?, ?, ?, ?)''',
(source, topic_id, name, value))
meta_dict[name] = value
for timestamp, value in values:
if timestamp is None:
timestamp = get_aware_utc_now()
_log.debug("Inserting into outstanding table with timestamp "
"{}".format(timestamp))
c.execute(
'''INSERT OR REPLACE INTO outstanding
values(NULL, ?, ?, ?, ?)''',
(timestamp, source, topic_id, jsonapi.dumps(value)))
self._connection.commit()
示例8: _collect_key
def _collect_key(self, web_address):
"""
Try to get (server key, instance name, vip-address) of remote instance and send it to RoutingService
to connect to the remote instance. If unsuccessful, try again later.
:param name: instance name
:param web_address: web address of remote instance
:return:
"""
platform_info = dict()
try:
platform_info = self._get_platform_discovery(web_address)
with self._ext_addresses_store_lock:
_log.debug("Platform discovery info: {}".format(platform_info))
name = platform_info['instance-name']
self._ext_addresses_store[name] = platform_info
self._ext_addresses_store.async_sync()
except KeyError as exc:
_log.error("Discovery info does not contain instance name {}".format(exc))
except DiscoveryError:
# If discovery error, try again later
sec = random.random() * self.r + 30
delay = utils.get_aware_utc_now() + timedelta(seconds=sec)
grnlet = self.core.schedule(delay, self._collect_key, web_address)
except ConnectionError as e:
_log.error("HTTP connection error {}".format(e))
#If platform discovery is successful, send the info to RoutingService
#to establish connection with remote platform.
if platform_info:
op = b'setupmode_platform_connection'
connection_settings = dict(platform_info)
connection_settings['web-address'] = web_address
self._send_to_router(op, connection_settings)
示例9: on_polling
def on_polling(self):
if self.zip is None and (self.region is None or self.city is None):
return
kwargs = {}
if self.zip is not None:
kwargs['zip'] = self.zip
topic = 'weather2/polling/current/ZIP/{zip}/all'.format(zip=self.zip)
else:
kwargs['region'] = self.region
kwargs['city'] = self.city
topic = 'weather2/polling/current/{region}/{city}/all'.format(
region=self.region,
city=self.city
)
wu_resp = self.wu_service.current(**kwargs)
publish_items = self.build_resp_current(wu_resp)
if len(publish_items) > 0:
headers = {
HEADER_NAME_DATE: format_timestamp(utils.get_aware_utc_now()),
HEADER_NAME_CONTENT_TYPE: headers_mod.CONTENT_TYPE.JSON
}
self.vip.pubsub.publish(peer='pubsub',
topic=topic,
message=publish_items,
headers=headers)
_log.debug(publish_items)
示例10: build_metadata_oadr_report
def build_metadata_oadr_report(self, report):
descriptions = []
for tel_vals in json.loads(report.telemetry_parameters).values():
# Rule 305: For TELEMETRY_USAGE reports, units in reportDescription.itemBase should be powerReal.
if tel_vals['units'] == 'powerReal':
item_base = oadr_20b.PowerRealType(itemDescription='RealPower',
itemUnits='W',
siScaleCode=None,
powerAttributes=None)
else:
item_base = None
min_freq, max_freq = tel_vals['min_frequency'], tel_vals['max_frequency']
desc = oadr_20b.oadrReportDescriptionType(rID=tel_vals['r_id'],
reportType=tel_vals['report_type'],
readingType=tel_vals['reading_type'],
itemBase=item_base,
oadrSamplingRate=self.build_sampling_rate(min_freq, max_freq))
descriptions.append(desc)
rpt_interval_duration = isodate.duration_isoformat(timedelta(seconds=report.interval_secs))
return oadr_20b.oadrReportType(duration=oadr_20b.DurationPropType(rpt_interval_duration),
oadrReportDescription=descriptions,
reportRequestID=None,
reportSpecifierID=report.report_specifier_id,
reportName=report.name,
createdDateTime=utils.get_aware_utc_now())
示例11: _capture_data
def _capture_data(self, peer, sender, bus, topic, headers, message,
device):
timestamp_string = headers.get(headers_mod.DATE, None)
timestamp = get_aware_utc_now()
if timestamp_string is not None:
timestamp, my_tz = process_timestamp(timestamp_string, topic)
try:
# 2.0 agents compatability layer makes sender == pubsub.compat so
# we can do the proper thing when it is here
message = self._clean_compat(sender, topic, headers, message)
except Exception as e:
_log.exception(e)
return
try:
if isinstance(message, dict):
values = message
else:
values = message[0]
except Exception as e:
_log.exception(e)
return
if topic.startswith('analysis'):
source = 'analysis'
else:
source = 'scrape'
_log.debug(
"Queuing {topic} from {source} for publish".format(topic=topic,
source=source))
for key, value in values.iteritems():
self._add_to_cache(device, key, value)
示例12: get_devices
def get_devices(self):
cp = deepcopy(self._devices)
foundbad = False
for k, v in cp.items():
dt = parse_timestamp_string(v['last_published_utc'])
dtnow = get_aware_utc_now()
if dt+datetime.timedelta(minutes=5) < dtnow:
v['health'] = Status.build(
BAD_STATUS,
'Too long between publishes for {}'.format(k)).as_dict()
foundbad = True
else:
v['health'] = Status.build(GOOD_STATUS).as_dict()
if len(cp):
if foundbad:
self.vip.health.set_status(
BAD_STATUS,
'At least one device has not published in 5 minutes')
else:
self.vip.health.set_status(
GOOD_STATUS,
'All devices publishing normally.'
)
return cp
示例13: update_override_patterns
def update_override_patterns(self):
if self._override_patterns is None:
try:
values = self.vip.config.get("override_patterns")
values = jsonapi.loads(values)
if isinstance(values, dict):
self._override_patterns = set()
for pattern, end_time in values.items():
# check the end_time
now = utils.get_aware_utc_now()
# If end time is indefinite, set override with indefinite duration
if end_time == "0.0":
self._set_override_on(pattern, 0.0, from_config_store=True)
else:
end_time = utils.parse_timestamp_string(end_time)
# If end time > current time, set override with new duration
if end_time > now:
delta = end_time - now
self._set_override_on(pattern, delta.total_seconds(), from_config_store=True)
else:
self._override_patterns = set()
except KeyError:
self._override_patterns = set()
except ValueError:
_log.error("Override patterns is not set correctly in config store")
self._override_patterns = set()
示例14: _update_override_interval
def _update_override_interval(self, interval, pattern):
if interval <= 0.0: # indicative of indefinite duration
if pattern in self._override_interval_events:
# If override duration is indifinite, do nothing
if self._override_interval_events[pattern] is None:
return False
else:
# Cancel the old event
evt = self._override_interval_events.pop(pattern)
evt[0].cancel()
self._override_interval_events[pattern] = None
return True
else:
override_start = utils.get_aware_utc_now()
override_end = override_start + timedelta(seconds=interval)
if pattern in self._override_interval_events:
evt = self._override_interval_events[pattern]
# If event is indefinite or greater than new end time, do nothing
if evt is None or override_end < evt[1]:
return False
else:
evt = self._override_interval_events.pop(pattern)
evt[0].cancel()
# Schedule new override event
event = self.core.schedule(override_end, self._cancel_override, pattern)
self._override_interval_events[pattern] = (event, override_end)
return True
示例15: test_ignore_topic
def test_ignore_topic(agent):
"""
Test ignore_topic rpc call. When a topic is ignored, it should not appear
in future alert messages
:param agent: fake agent used to make rpc calls to alert agent
"""
global alert_messages, db_connection
agent.vip.rpc.call(PLATFORM_TOPIC_WATCHER, 'ignore_topic', 'group1',
'fakedevice2/all').get()
alert_messages.clear()
publish_time = get_aware_utc_now()
agent.vip.pubsub.publish(peer='pubsub',
topic='fakedevice')
agent.vip.pubsub.publish(peer='pubsub',
topic='fakedevice2/all',
message=[{'point': 'value'}])
print("Alert messages {}".format(alert_messages))
gevent.sleep(7)
assert len(alert_messages) == 1
assert u"Topic(s) not published within time limit: ['fakedevice']" in \
alert_messages
c = db_connection.cursor()
c.execute('SELECT * FROM topic_log '
'WHERE first_seen_after_timeout is NULL '
'AND last_seen_before_timeout > "{}"'.format(publish_time))
results = c.fetchall()
topics = []
assert results is not None
assert len(results) == 1
assert results[0][0] == u'fakedevice'
assert results[0][2] == None