本文整理汇总了Python中volttron.platform.agent.utils.format_timestamp函数的典型用法代码示例。如果您正苦于以下问题:Python format_timestamp函数的具体用法?Python format_timestamp怎么用?Python format_timestamp使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了format_timestamp函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: publish_target_info_pgne
def publish_target_info_pgne(self, cur_analysis_time_utc):
cur_analysis_time_utc = parser.parse(cur_analysis_time_utc)
target_messages = self.get_target_info_pgne(format_timestamp(cur_analysis_time_utc), 'UTC')
if len(target_messages) > 0:
target_topic = '/'.join(['analysis', 'target_agent', self.site, self.building, 'goal'])
for target_message in target_messages:
headers = {'Date': format_timestamp(get_aware_utc_now())}
self.vip.pubsub.publish(
'pubsub', target_topic, headers, target_message).get(timeout=15)
_log.debug("TargetAgent {topic}: {value}".format(
topic=target_topic,
value=target_message))
gevent.sleep(2)
# Schedule next run at min 30 of next hour only if current min >= 30
one_hour = timedelta(hours=1)
cur_min = cur_analysis_time_utc.minute
next_analysis_time = cur_analysis_time_utc.replace(minute=30,
second=0,
microsecond=0)
if cur_min >= 30:
next_analysis_time += one_hour
next_run_time = next_analysis_time
if self.dr_mode == 'dev':
next_run_time = get_aware_utc_now() + timedelta(seconds=15)
if self.dr_mode != 'manual':
self.core.schedule(next_run_time, self.publish_target_info,
format_timestamp(next_analysis_time))
示例2: on_ilc_start
def on_ilc_start(self, peer, sender, bus, topic, headers, message):
cur_time = self.local_tz.localize(datetime.now())
cur_time_utc = cur_time.astimezone(pytz.utc)
one_hour = timedelta(hours=1)
prev_time_utc = cur_time_utc - one_hour
self.publish_target_info(format_timestamp(prev_time_utc))
self.publish_target_info(format_timestamp(cur_time_utc))
示例3: test_record_topic
def test_record_topic(publish_agent, query_agent):
"""
Test if record topic message is getting forwarded to historian running on
another instance.
:param publish_agent: Fake agent used to publish messages to bus in
volttron_instance1. Calling this fixture makes sure all the dependant
fixtures are called to setup and start volttron_instance1 and forwareder
agent and returns the instance of fake agent to publish
:param query_agent: Fake agent used to query sqlhistorian in
volttron_instance2. Calling this fixture makes sure all the dependant
fixtures are called to setup and start volttron_instance2 and sqlhistorian
agent and returns the instance of a fake agent to query the historian
"""
# Create timestamp
print("\n** test_record_topic **")
now = utils.format_timestamp(datetime.utcnow())
print("now is ", now)
headers = {
headers_mod.DATE: now,
headers_mod.TIMESTAMP: now
}
# Publish messages
publish(publish_agent, topics.RECORD, headers, 1)
# sleep so that records gets inserted with unique timestamp
gevent.sleep(0.5)
time2 = utils.format_timestamp(datetime.utcnow())
headers = {
headers_mod.DATE: time2,
headers_mod.TIMESTAMP: time2
}
publish(publish_agent, topics.RECORD, headers, 'value0')
# sleep so that records gets inserted with unique timestamp
gevent.sleep(0.5)
time3 = utils.format_timestamp(datetime.utcnow())
headers = {
headers_mod.DATE: time3,
headers_mod.TIMESTAMP: time3
}
publish(publish_agent, topics.RECORD, headers, {'key': 'value'})
gevent.sleep(0.5)
result = query_agent.vip.rpc.call('platform.historian',
'query',
topic=topics.RECORD,
start=now,
order="FIRST_TO_LAST").get(timeout=10)
print('Query Result', result)
assert (len(result['values']) == 3)
assert (result['values'][0][1] == 1)
assert (result['values'][1][1] == 'value0')
assert (result['values'][2][1] == {'key': 'value'})
assert result['values'][2][0] == time3 + '+00:00'
示例4: validate_cache_result_forecast
def validate_cache_result_forecast(locations, api_result, cache_result):
for result in api_result:
time_in_results = False
for cr in cache_result:
if utils.format_timestamp(cr[2]) == result["generation_time"]:
for record in result["weather_results"]:
if utils.format_timestamp(cr[3]).startswith(record[0]):
time_in_results = True
assert ujson.loads(cr[1]) in locations
assert record[1] == ujson.loads(cr[4])
break
assert time_in_results
示例5: new_data
def new_data(self, peer, sender, bus, topic, headers, message):
"""
Call back method for curtailable device data subscription.
:param peer:
:param sender:
:param bus:
:param topic:
:param headers:
:param message:
:return:
"""
if self.kill_signal_received:
return
_log.info("Data Received for {}".format(topic))
# topic of form: devices/campus/building/device
device_name = self.device_topic_map[topic]
data = message[0]
meta = message[1]
now = parser.parse(headers["Date"])
current_time_str = format_timestamp(now)
parsed_data = parse_sympy(data)
subdevices = self.curtailment.get_device(device_name).command_status.keys()
for subdevice in subdevices:
status = self.curtailment.get_device(device_name).currently_curtailed[subdevice]
_log.debug("Device: {} -- subdevice: {} -- status: {}".format(device_name, subdevice, status))
self.criteria.get_device(device_name[0]).criteria_status(subdevice, status)
self.criteria.get_device(device_name[0]).ingest_data(now, parsed_data)
self.curtailment.get_device(device_name).ingest_data(parsed_data)
self.create_device_status_publish(current_time_str, device_name, data, topic, meta)
示例6: check_load
def check_load(self, bldg_power, current_time):
"""
Check whole building power and if the value is above the
the demand limit (demand_limit) then initiate the ILC (AHP)
sequence.
:param bldg_power:
:param current_time:
:return:
"""
_log.debug("Checking building load.")
if self.demand_limit is None:
result = "Demand goal has not been set. Current load: ({load}) kW.".format(load=bldg_power)
else:
result = "Current load: ({load}) kW is below demand limit of {limit} kW.".format(load=bldg_power,
limit=self.demand_limit)
if self.demand_limit is not None and bldg_power > self.demand_limit:
result = "Current load of {} kW exceeds demand limit of {} kW.".format(bldg_power, self.demand_limit)
scored_devices = self.criteria.get_score_order()
on_devices = self.curtailment.get_on_devices()
score_order = [device for scored in scored_devices for device in on_devices if scored in [(device[0], device[1])]]
_log.debug("Scored devices: {}".format(scored_devices))
_log.debug("On devices: {}".format(on_devices))
_log.debug("Scored and on devices: {}".format(score_order))
if not score_order:
_log.info("All devices are off, nothing to curtail.")
return
self.device_group_size = None
scored_devices = self.actuator_request(score_order)
self.curtail(scored_devices, bldg_power, current_time)
self.create_application_status(format_timestamp(current_time), result)
示例7: update_values
def update_values(self, data, topic_name, start, end, values):
if start.tzinfo:
data[0] = data[0].replace(tzinfo=tzutc())
if data[0] >= start and data[0] < end:
result_value = self.json_string_to_dict(data[1])
values[topic_name].append(
(utils.format_timestamp(data[0]), result_value))
示例8: _on_platform_log_message
def _on_platform_log_message(self, peer, sender, bus, topic, headers,
message):
""" Receive message from a registered platform
This method is called with stats from the registered platform agents.
"""
_log.debug('Got topic: {}'.format(topic))
_log.debug('Got message: {}'.format(message))
topicsplit = topic.split('/')
platform_uuid = topicsplit[2]
# For devices we use everything between devices/../all as a unique
# key for determining the last time it was seen.
key = '/'.join(topicsplit[:])
_log.debug("key is: {}".format(key))
uuid = topicsplit[2]
point_list = []
for point, item in message.iteritems():
point_list.append(point)
stats = {
'topic': key,
'points': point_list,
'last_published_utc': format_timestamp(get_aware_utc_now())
}
self._registry.update_performance(platform_uuid=platform_uuid,
performance=stats)
示例9: read_callback
def read_callback(self, results):
# XXX: Does a warning need to be printed?
if results is None:
return
now = utils.format_timestamp(datetime.utcnow())
headers = {
headers_mod.CONTENT_TYPE: headers_mod.CONTENT_TYPE.JSON,
headers_mod.DATE: now,
headers_mod.TIMESTAMP: now
}
for point, value in results.iteritems():
if isinstance(value, bool):
value = int(value)
self.add('/'+point, value)
try:
for point, value in results.iteritems():
if isinstance(value, bool):
value = int(value)
depth, breadth = self.get_paths_for_point('/'+point)
self.publish_json(depth, headers, value, self.meta_data[point], flags=zmq.NOBLOCK)
self.publish_json(breadth, headers, value, self.meta_data[point], flags=zmq.NOBLOCK)
self.publish_json(self.all_path_depth, headers, results, self.meta_data, flags=zmq.NOBLOCK)
self.publish_json(self.all_path_breadth, headers, results, self.meta_data, flags=zmq.NOBLOCK)
except zmq.error.Again:
print ("Warning: platform not running, topics not published. (Data to smap historian is unaffected by this warning)")
示例10: publish_all
def publish_all(self, observation, topic_prefix="weather", headers={}):
utcnow = utils.get_aware_utc_now()
utcnow_string = utils.format_timestamp(utcnow)
headers.update({HEADER_NAME_DATE: utcnow_string,
headers_mod.TIMESTAMP: utcnow_string})
self.publish_subtopic(self.build_dictionary(observation),
topic_prefix, headers)
示例11: _set_override_off
def _set_override_off(self, pattern):
pattern = pattern.lower()
# If pattern exactly matches
if pattern in self._override_patterns:
self._override_patterns.discard(pattern)
# Cancel any pending override events
self._cancel_override_events(pattern)
self._override_devices.clear()
patterns = dict()
# Build override devices list again
for pat in self._override_patterns:
for device in self.instances:
device = device.lower()
if fnmatch.fnmatch(device, pat):
self._override_devices.add(device)
if self._override_interval_events[pat] is None:
patterns[pat] = str(0.0)
else:
evt, end_time = self._override_interval_events[pat]
patterns[pat] = utils.format_timestamp(end_time)
self.vip.config.set("override_patterns", jsonapi.dumps(patterns))
else:
_log.error("Override Pattern did not match!")
raise OverrideError(
"Pattern {} does not exist in list of override patterns".format(pattern))
示例12: _on_device_message
def _on_device_message(self, peer, sender, bus, topic, headers, message):
# only deal with agents that have not been forwarded.
if headers.get('X-Forwarded', None):
return
# only listen to the ending all message.
if not re.match('.*/all$', topic):
return
topicsplit = topic.split('/')
# For devices we use everything between devices/../all as a unique
# key for determining the last time it was seen.
key = '/'.join(topicsplit[1: -1])
anon_topic = self._topic_replace_map[key]
if not anon_topic:
anon_topic = key
for sr in self._topic_replace_list:
_log.debug(
'anon replacing {}->{}'.format(sr['from'], sr['to']))
anon_topic = anon_topic.replace(sr['from'],
sr['to'])
_log.debug('anon after replacing {}'.format(anon_topic))
_log.debug('Anon topic is: {}'.format(anon_topic))
self._topic_replace_map[key] = anon_topic
_log.debug('DEVICES ON PLATFORM ARE: {}'.format(self._devices))
self._devices[anon_topic] = {
'points': message[0].keys(),
'last_published_utc': format_timestamp(get_aware_utc_now())
}
示例13: publish_baseline
def publish_baseline(self, df, cur_time):
"""
This method is obsolete. Keep here for reference only.
"""
topic_tmpl = "analysis/PGnE/{campus}/{building}/"
topic_prefix = topic_tmpl.format(campus=self.site,
building=self.building)
headers = {'Date': format_timestamp(cur_time)}
last_idx = len(df.index)-1
sec_last_idx = last_idx - 1
#avg 10 day
topic1 = topic_prefix + "avg10"
value1 = df['pow_avg'][last_idx]
#adj avg 10 day
topic2 = topic_prefix + "adj_avg10"
value_hr1 = df['pow_adj_avg'][sec_last_idx]
value_hr2 = df['pow_adj_avg'][last_idx]
#avg 5 hottest in 10 day
topic3 = topic_prefix + "hot5_avg10"
value3 = df['hot5_pow_avg'][last_idx]
#adj avg 5 hottest in 10 day
topic4 = topic_prefix + "hot5_adj_avg10"
value4 = df['hot5_pow_adj_avg'][last_idx]
#publish to message bus: only 10 day adjustment
meta = {'type': 'float', 'tz': self.tz, 'units': 'kW'}
msg = [{
"value_hr1": value_hr1,
"value_hr2": value_hr2
}, {
"value_hr1": meta,
"value_hr2": meta
}]
self.vip.pubsub.publish(
'pubsub', topic2, headers, msg).get(timeout=10)
示例14: _set_override_off
def _set_override_off(self, pattern):
"""Turn off override condition on all devices matching the pattern. It removes the pattern from the override
patterns set, clears the list of overriden devices and reevaluates the state of devices. It then cancels the
pending override event and removes pattern from the config store.
:param pattern: Override pattern to be removed.
:type pattern: str
"""
pattern = pattern.lower()
# If pattern exactly matches
if pattern in self._override_patterns:
self._override_patterns.discard(pattern)
# Cancel any pending override events
self._cancel_override_events(pattern)
self._override_devices.clear()
patterns = dict()
# Build override devices list again
for pat in self._override_patterns:
for device in self.instances:
device = device.lower()
if fnmatch.fnmatch(device, pat):
self._override_devices.add(device)
if self._override_interval_events[pat] is None:
patterns[pat] = str(0.0)
else:
evt, end_time = self._override_interval_events[pat]
patterns[pat] = utils.format_timestamp(end_time)
self.vip.config.set("override_patterns", jsonapi.dumps(patterns))
else:
_log.error("Override Pattern did not match!")
raise OverrideError(
"Pattern {} does not exist in list of override patterns".format(pattern))
示例15: on_polling
def on_polling(self):
if self.zip is None and (self.region is None or self.city is None):
return
kwargs = {}
if self.zip is not None:
kwargs['zip'] = self.zip
topic = 'weather2/polling/current/ZIP/{zip}/all'.format(zip=self.zip)
else:
kwargs['region'] = self.region
kwargs['city'] = self.city
topic = 'weather2/polling/current/{region}/{city}/all'.format(
region=self.region,
city=self.city
)
wu_resp = self.wu_service.current(**kwargs)
publish_items = self.build_resp_current(wu_resp)
if len(publish_items) > 0:
headers = {
HEADER_NAME_DATE: format_timestamp(utils.get_aware_utc_now()),
HEADER_NAME_CONTENT_TYPE: headers_mod.CONTENT_TYPE.JSON
}
self.vip.pubsub.publish(peer='pubsub',
topic=topic,
message=publish_items,
headers=headers)
_log.debug(publish_items)