本文整理汇总了Python中pyon.public.log.error函数的典型用法代码示例。如果您正苦于以下问题:Python error函数的具体用法?Python error怎么用?Python error使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了error函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: validate_driver_configuration
def validate_driver_configuration(self, driver_config):
"""
Driver config must include 'oms_uri' entry.
"""
if not 'oms_uri' in driver_config:
log.error("'oms_uri' not present in driver_config = %s", driver_config)
raise PlatformDriverException(msg="driver_config does not indicate 'oms_uri'")
示例2: eval_alarm
def eval_alarm(alarm_def, x):
"""
"""
alarm_def.current_val = x
old_status = alarm_def.status
alarm_def.status = eval(alarm_def.expr)
event_data = None
if old_status != alarm_def.status:
event_data = {
'name' : alarm_def.name,
'message' : alarm_def.message,
'expr' : alarm_def.expr,
'stream_name' : alarm_def.stream_name,
'value_id' : alarm_def.value_id,
'value' : x
}
if not alarm_def.status:
event_data['event_type'] = 'StreamAllClearAlarmEvent'
event_data['message'] = 'The alarm %s has cleared.' % alarm_def.name
elif alarm_def.type == StreamAlarmType.WARNING:
event_data['event_type'] = 'StreamWarningAlaramEvent'
elif alarm_def.type == StreamAlarmType.ALERT:
event_data['event_type'] = 'StreamAlertAlarmEvent'
else:
log.error('Unknown alarm type.')
event_data = None
return (alarm_def, event_data)
示例3: verify
def verify(self, granules):
"""
Verify granules passed in against result set read
in the ctor.
Ensure:
- Verify granules as a set
- Verify individual granule data
store verification result in the object and
return success or failure.
@param particls: list of granules to verify.
@return True if verification successful, False otherwise
"""
self._clear_report()
result = True
granule_data = self._extract_granule_data(granules)
if self._verify_set(granule_data):
result = self._verify_granules(granule_data)
else:
result = False
if not result:
log.error("Failed verification: \n%s", self.report())
return result
示例4: insert_values
def insert_values(self, coverage, rdt, stream_id):
np_dict = self.build_data_dict(rdt)
if 'ingestion_timestamp' in coverage.list_parameters():
timestamps = np.array([(time.time()+2208988800) for i in rdt[rdt.temporal_parameter]])
np_dict['ingestion_timestamp'] = NumpyParameterData('ingestion_timestamp', timestamps, rdt[rdt.temporal_parameter])
# If it's sparse only
if self.sparse_only(rdt):
del np_dict[rdt.temporal_parameter]
try:
coverage.set_parameter_values(np_dict)
except IOError as e:
log.error("Couldn't insert values for coverage: %s",
coverage.persistence_dir, exc_info=True)
try:
coverage.close()
finally:
self._bad_coverages[stream_id] = 1
raise CorruptionError(e.message)
except KeyError as e:
if 'has not been initialized' in e.message:
coverage.refresh()
raise
except Exception as e:
print repr(rdt)
raise
示例5: _create_mission_scheduler
def _create_mission_scheduler(self, mission_id, mission_yml):
"""
@param mission_id
@param mission_yml
"""
log.debug('[mm] _create_mission_scheduler: mission_id=%r', mission_id)
mission_loader = MissionLoader(self._agent)
mission_loader.load_mission(mission_id, mission_yml)
self._mission_entries = mission_loader.mission_entries
log.debug('[mm] _create_mission_scheduler: _ia_clients=\n%s',
self._agent._pp.pformat(self._agent._ia_clients))
# get instrument IDs and clients for the valid running instruments:
instruments = {}
for (instrument_id, obj) in self._agent._ia_clients.iteritems():
if isinstance(obj, dict):
# it's valid instrument.
if instrument_id != obj.resource_id:
log.error('[mm] _create_mission_scheduler: instrument_id=%s, '
'resource_id=%s', instrument_id, obj.resource_id)
instruments[obj.resource_id] = obj.ia_client
mission_scheduler = MissionScheduler(self._agent,
instruments,
self._mission_entries)
log.debug('[mm] _create_mission_scheduler: MissionScheduler created. entries=%s',
self._mission_entries)
return mission_scheduler
示例6: persist_or_timeout
def persist_or_timeout(self, stream_id, rdt):
""" retry writing coverage multiple times and eventually time out """
done = False
timeout = 2
start = time.time()
while not done:
try:
self.add_granule(stream_id, rdt)
done = True
except:
log.exception('An issue with coverage, retrying after a bit')
if (time.time() - start) > MAX_RETRY_TIME: # After an hour just give up
dataset_id = self.get_dataset(stream_id)
log.error("We're giving up, the coverage needs to be inspected %s", DatasetManagementService._get_coverage_path(dataset_id))
raise
if stream_id in self._coverages:
log.info('Popping coverage for stream %s', stream_id)
self._coverages.pop(stream_id)
gevent.sleep(timeout)
if timeout > (60 * 5):
timeout = 60 * 5
else:
timeout *= 2
示例7: _construct_stream_and_publisher
def _construct_stream_and_publisher(self, stream_name, stream_config):
if log.isEnabledFor(logging.TRACE): # pragma: no cover
log.trace("%r: _construct_stream_and_publisher: "
"stream_name:%r, stream_config:\n%s",
self._platform_id, stream_name,
self._pp.pformat(stream_config))
decoder = IonObjectDeserializer(obj_registry=get_obj_registry())
if 'stream_def_dict' not in stream_config:
# should not happen: PlatformAgent._validate_configuration validates this.
log.error("'stream_def_dict' key not in configuration for stream %r" % stream_name)
return
stream_def_dict = stream_config['stream_def_dict']
stream_def_dict['type_'] = 'StreamDefinition'
stream_def_obj = decoder.deserialize(stream_def_dict)
self._stream_defs[stream_name] = stream_def_obj
routing_key = stream_config['routing_key']
stream_id = stream_config['stream_id']
exchange_point = stream_config['exchange_point']
parameter_dictionary = stream_def_dict['parameter_dictionary']
log.debug("%r: got parameter_dictionary from stream_def_dict", self._platform_id)
self._data_streams[stream_name] = stream_id
self._param_dicts[stream_name] = ParameterDictionary.load(parameter_dictionary)
stream_route = StreamRoute(exchange_point=exchange_point, routing_key=routing_key)
publisher = self._create_publisher(stream_id, stream_route)
self._data_publishers[stream_name] = publisher
log.debug("%r: created publisher for stream_name=%r", self._platform_id, stream_name)
示例8: add_granule
def add_granule(self,stream_id, granule):
'''
Appends the granule's data to the coverage and persists it.
'''
#--------------------------------------------------------------------------------
# Coverage determiniation and appending
#--------------------------------------------------------------------------------
dataset_id = self.get_dataset(stream_id)
if not dataset_id:
log.error('No dataset could be determined on this stream: %s', stream_id)
return
coverage = self.get_coverage(stream_id)
if not coverage:
log.error('Could not persist coverage from granule, coverage is None')
return
#--------------------------------------------------------------------------------
# Actual persistence
#--------------------------------------------------------------------------------
rdt = RecordDictionaryTool.load_from_granule(granule)
elements = len(rdt)
if not elements:
return
coverage.insert_timesteps(elements)
start_index = coverage.num_timesteps - elements
for k,v in rdt.iteritems():
if k == 'image_obj':
log.trace( '%s:', k)
else:
log.trace( '%s: %s', k, v)
slice_ = slice(start_index, None)
coverage.set_parameter_values(param_name=k, tdoa=slice_, value=v)
coverage.flush()
示例9: persist
def persist(self, dataset_granule): #pragma no cover
'''
Persists the dataset metadata
'''
#--------------------------------------------------------------------------------
# Theres a potential that the datastore could have been deleted while ingestion
# is still running. Essentially this refreshes the state
#--------------------------------------------------------------------------------
try:
self.db.create_doc(dataset_granule)
return
except ResourceNotFound as e:
log.error('The datastore was removed while ingesting (retrying)')
self.db = self.container.datastore_manager.get_datastore(self.datastore_name, DataStore.DS_PROFILE.SCIDATA)
#--------------------------------------------------------------------------------
# The first call to create_doc attached an _id to the dictionary which causes an
# error to be raised, to make this more resilient, we investigate to ensure
# the dictionary does not have any of these excess keys
#--------------------------------------------------------------------------------
try:
if '_id' in dataset_granule:
del dataset_granule['_id']
if '_rev' in dataset_granule:
del dataset_granule['_rev']
self.db.create_doc(dataset_granule)
except ResourceNotFound as e:
log.error(e.message) # Oh well I tried
示例10: _construct_packet_factories
def _construct_packet_factories(self):
"""
Construct packet factories from packet_config member of the
driver_config.
@retval None
"""
packet_config = self._dvr_config['packet_config']
for (name, val) in packet_config.iteritems():
if val:
mod = val[0]
cls = val[1]
import_str = 'from %s import %s' % (mod, cls)
ctor_str = 'ctor = %s' % cls
try:
exec import_str
exec ctor_str
except Exception:
log.error('Instrument agent %s had error creating packet factories from %s.%s',
self._proc_name, mod, cls)
else:
self._packet_factories[name] = ctor
log.info('Instrument agent %s created packet factory for stream %s',
self._proc_name, name)
示例11: on_init
def on_init(self):
if not EEAgentCore:
msg = "EEAgentCore isn't available. Use autolaunch.cfg buildout"
log.error(msg)
self.heartbeat_thread = None
return
log.debug("ExecutionEngineAgent Pyon on_init")
launch_type_name = self.CFG.eeagent.launch_type.name
if not launch_type_name:
# TODO: Fail fast here?
log.error("No launch_type.name specified")
self._factory = get_exe_factory(
launch_type_name, self.CFG, pyon_container=self.container, log=log)
# TODO: Allow other core class?
self.core = EEAgentCore(self.CFG, self._factory, log)
interval = float(self.CFG.eeagent.get('heartbeat', DEFAULT_HEARTBEAT))
if interval > 0:
self.heartbeater = HeartBeater(
self.CFG, self._factory, self.resource_id, self, log=log)
self.heartbeater.poll()
self.heartbeat_thread, self._heartbeat_thread_event = looping_call(0.1, self.heartbeater.poll)
else:
self.heartbeat_thread = None
self._heartbeat_thread_event = None
示例12: assert_state_change
def assert_state_change(self, target_agent_state, timeout=10):
"""
Verify the agent and resource states change as expected within the timeout
Fail if the state doesn't change to the expected state.
@param target_agent_state: State we expect the agent to be in
@param timeout: how long to wait for the driver to change states
"""
to = gevent.Timeout(timeout)
to.start()
done = False
agent_state = None
try:
while(not done):
agent_state = self._dsa_client.get_agent_state()
log.error("Current agent state: %s", agent_state)
if(agent_state == target_agent_state):
log.debug("Current state match: %s", agent_state)
done = True
if not done:
log.debug("state mismatch, waiting for state to transition.")
gevent.sleep(1)
except Timeout:
log.error("Failed to transition agent state to %s, current state: %s", target_agent_state, agent_state)
self.fail("Failed to transition state.")
finally:
to.cancel()
示例13: _get_dsa_client
def _get_dsa_client(self, instrument_device, dsa_instance):
"""
Launch the agent and return a client
"""
fake_process = FakeProcess()
fake_process.container = self.container
clients = DataAcquisitionManagementServiceDependentClients(fake_process)
config_builder = ExternalDatasetAgentConfigurationBuilder(clients)
try:
config_builder.set_agent_instance_object(dsa_instance)
self.agent_config = config_builder.prepare()
log.trace("Using dataset agent configuration: %s", pprint.pformat(self.agent_config))
except Exception as e:
log.error('failed to launch: %s', e, exc_info=True)
raise ServerError('failed to launch')
dispatcher = ProcessDispatcherServiceClient()
launcher = AgentLauncher(dispatcher)
log.debug("Launching agent process!")
process_id = launcher.launch(self.agent_config, config_builder._get_process_definition()._id)
if not process_id:
raise ServerError("Launched external dataset agent instance but no process_id")
config_builder.record_launch_parameters(self.agent_config)
launcher.await_launch(10.0)
return ResourceAgentClient(instrument_device._id, process=FakeProcess())
示例14: find_instrument_agents
def find_instrument_agents( requester=None):
instrument_agent_find_request = { "serviceRequest": {
"serviceName": "instrument_management",
"serviceOp": "find_instrument_agents",
"expiry": 0,
"params": {
}
}
}
if requester is not None:
instrument_agent_find_request["serviceRequest"]["requester"] = requester
response = gateway_request('instrument_management/find_instrument_agents', simplejson.dumps(instrument_agent_find_request) )
if response['data'].has_key(GATEWAY_ERROR):
log.error(response['data'][GATEWAY_ERROR][GATEWAY_ERROR_MESSAGE])
return response['data'][GATEWAY_ERROR][GATEWAY_ERROR_MESSAGE]
response_data = response['data'][GATEWAY_RESPONSE]
log.info('Number of Instrument Agent objects: %s' % (str(len(response_data))))
for res in response_data:
log.debug(res)
return response_data
示例15: check_lifecycle_policy
def check_lifecycle_policy(self, process, message, headers):
try:
gov_values = GovernanceHeaderValues(headers=headers, process=process)
resource_id = gov_values.resource_id
except Inconsistent, ex:
log.error("unable to retrieve governance header")
return False, ex.message