本文整理汇总了Python中pyon.util.log.log.warn函数的典型用法代码示例。如果您正苦于以下问题:Python warn函数的具体用法?Python warn怎么用?Python warn使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了warn函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: process_csv
def process_csv(self, fuc):
# CSV file open here
csv_filename = fuc.get('path', None)
if csv_filename is None:
raise BadRequest("uploaded file has no path")
# keep track of the number of calibrations we actually process
nupdates = 0
updates = {} # keys are reference_designators, use to update object store after parsing CSV
with open(csv_filename, 'rb') as csvfile:
# eliminate blank lines
csvfile = (row for row in csvfile if len(row.strip()) > 0)
# eliminate commented lines
csvfile = (row for row in csvfile if not row.startswith('#'))
# open CSV reader
csv_reader = csv.reader(csvfile, delimiter=',') # skip commented lines
# iterate the rows returned by csv.reader
for row in csv_reader:
if len(row) != 6:
log.warn("invalid calibration line %s" % ','.join(row))
continue
try:
ipn = row[0] # instrument_property_number
name = row[1] # calibration_name
value = float(row[2]) # calibration_value
units = row[3]
description = row[4] # description
start_date = row[5] # start_date TODO date object?
d = {
'value':value,
'units':units,
'description':description,
'start_date':start_date
}
except ValueError as e:
continue #TODO error message? or just skip?
# get ipn key
if ipn not in updates:
updates[ipn] = {} # initialize empty array
if name not in updates[ipn]:
updates[ipn][name] = [] # will be array of dicts
updates[ipn][name].append(d)
nupdates = nupdates + 1
# insert the updates into object store
self.update_object_store(updates)
# update FileUploadContext object (change status to complete)
fuc['status'] = 'UploadCalibrationProcessing process complete - %d updates added to object store' % nupdates
self.object_store.update_doc(fuc)
# remove uploaded file
try:
os.remove(csv_filename)
except OSError:
pass # TODO take action to get this removed
示例2: _sync_call
def _sync_call(self, func, cb_arg, *args, **kwargs):
"""
Functionally similar to the generic blocking_cb but with error support that's Channel specific.
"""
ar = AsyncResult()
def cb(*args, **kwargs):
ret = list(args)
if len(kwargs): ret.append(kwargs)
ar.set(ret)
eb = lambda ch, *args: ar.set(TransportError("_sync_call could not complete due to an error (%s)" % args))
kwargs[cb_arg] = cb
with self._push_close_cb(eb):
func(*args, **kwargs)
ret_vals = ar.get(timeout=10)
if isinstance(ret_vals, TransportError):
# mark this channel as poison, do not use again!
# don't test for type here, we don't want to have to import PyonSelectConnection
if hasattr(self._client.transport, 'connection') and hasattr(self._client.transport.connection, 'mark_bad_channel'):
self._client.transport.connection.mark_bad_channel(self._client.channel_number)
else:
log.warn("Could not mark channel # (%s) as bad, Pika could be corrupt", self._client.channel_number)
raise ret_vals
if len(ret_vals) == 0:
return None
elif len(ret_vals) == 1:
return ret_vals[0]
return tuple(ret_vals)
示例3: _spawn_agent_process
def _spawn_agent_process(self, process_id, name, module, cls, config):
"""
Spawn a process acting as agent process.
Attach to service pid.
"""
service_instance = self._create_service_instance(process_id, name, module, cls, config)
if not isinstance(service_instance, ResourceAgent):
raise ContainerConfigError("Agent process must extend ResourceAgent")
self._service_init(service_instance)
self._set_service_endpoint(service_instance, service_instance.id)
self._service_start(service_instance)
# Directory registration
caps = service_instance.get_capabilities()
self.container.directory.register("/Agents", service_instance.id,
**dict(name=service_instance._proc_name,
container=service_instance.container.id,
resource_id=service_instance.resource_id,
agent_id=service_instance.agent_id,
def_id=service_instance.agent_def_id,
capabilities=caps))
if not service_instance.resource_id:
log.warn("Agent process id=%s does not define resource_id!!" % service_instance.id)
return service_instance
示例4: serve_forever
def serve_forever(self):
""" Run the container until killed. """
log.debug("In Container.serve_forever")
if not self.proc_manager.proc_sup.running:
self.start()
# serve forever short-circuits if immediate is on and children len is ok
num_procs = len(self.proc_manager.proc_sup.children)
immediate = CFG.system.get('immediate', False)
if not (immediate and num_procs == 1): # only spawned greenlet is the CC-Agent
# print a warning just in case
if immediate and num_procs != 1:
log.warn("CFG.system.immediate=True but number of spawned processes is not 1 (%d)", num_procs)
try:
# This just waits in this Greenlet for all child processes to complete,
# which is triggered somewhere else.
self.proc_manager.proc_sup.join_children()
except (KeyboardInterrupt, SystemExit) as ex:
log.info('Received a kill signal, shutting down the container.')
if hasattr(self, 'gl_parent_watch') and self.gl_parent_watch is not None:
self.gl_parent_watch.kill()
except:
log.exception('Unhandled error! Forcing container shutdown')
else:
log.debug("Container.serve_forever short-circuiting due to CFG.system.immediate")
self.proc_manager.proc_sup.shutdown(CFG.cc.timeout.shutdown)
示例5: update_service_access_policy
def update_service_access_policy(self, service_name, service_op='', delete_policy=False):
if self.policy_decision_point_manager is not None:
try:
#First update any access policy rules
rules = self.policy_client.get_active_service_access_policy_rules(service_name, self._container_org_name)
self.policy_decision_point_manager.load_service_policy_rules(service_name, rules)
except Exception, e:
#If the resource does not exist, just ignore it - but log a warning.
log.warn("The service %s is not found or there was an error applying access policy: %s" % ( service_name, e.message))
#Next update any precondition policies
try:
proc = self.container.proc_manager.get_a_local_process(service_name)
if proc is not None:
op_preconditions = self.policy_client.get_active_process_operation_preconditions(service_name, service_op, self._container_org_name)
if op_preconditions:
for op in op_preconditions:
for pre in op.preconditions:
self.unregister_process_operation_precondition(proc,op.op, pre)
if not delete_policy:
self.register_process_operation_precondition(proc, op.op, pre )
else:
#Unregister all...just in case
self.unregister_all_process_operation_precondition(proc,service_op)
except Exception, e:
#If the resource does not exist, just ignore it - but log a warning.
log.warn("The process %s is not found for op %s or there was an error applying access policy: %s" % ( service_name, service_op, e.message))
示例6: target
def target(self, *args, **kwargs):
"""
Control entrypoint. Setup the base properties for this process (mainly a listener).
"""
if self.name:
threading.current_thread().name = "%s-target" % self.name
# start time
self._start_time = int(get_ion_ts())
# spawn control flow loop
self._ctrl_thread = self.thread_manager.spawn(self._control_flow)
# wait on control flow loop, heartbeating as appropriate
while not self._ctrl_thread.ev_exit.wait(timeout=self._heartbeat_secs):
hbst = self.heartbeat()
if not all(hbst):
log.warn("Heartbeat status for process %s returned %s", self, hbst)
if self._heartbeat_stack is not None:
stack_out = "".join(traceback.format_list(self._heartbeat_stack))
else:
stack_out = "N/A"
#raise PyonHeartbeatError("Heartbeat failed: %s, stacktrace:\n%s" % (hbst, stack_out))
log.warn("Heartbeat failed: %s, stacktrace:\n%s", hbst, stack_out)
# this is almost a no-op as we don't fall out of the above loop without
# exiting the ctrl_thread, but having this line here makes testing much
# easier.
self._ctrl_thread.join()
示例7: _spawned_proc_failed
def _spawned_proc_failed(self, gproc):
log.error("ProcManager._spawned_proc_failed: %s, %s", gproc, gproc.exception)
# for now - don't worry about the mapping, if we get a failure, just kill the container.
# leave the mapping in place for potential expansion later.
# # look it up in mapping
# if not gproc in self._spawned_proc_to_process:
# log.warn("No record of gproc %s in our map (%s)", gproc, self._spawned_proc_to_process)
# return
#
prc = self._spawned_proc_to_process.get(gproc, None)
#
# # make sure prc is in our list
# if not prc in self.procs.values():
# log.warn("prc %s not found in procs list", prc)
# return
# stop the rest of the process
if prc is not None:
try:
self.terminate_process(prc.id, False)
except Exception as e:
log.warn("Problem while stopping rest of failed process %s: %s", prc, e)
finally:
self._call_proc_state_changed(prc, ProcessStateEnum.FAILED)
else:
log.warn("No ION process found for failed proc manager child: %s", gproc)
#self.container.fail_fast("Container process (%s) failed: %s" % (svc, gproc.exception))
# Stop the container if this was the last process
if not self.procs and CFG.get_safe("container.processes.exit_once_empty", False):
self.container.fail_fast("Terminating container after last process (%s) failed: %s" % (gproc, gproc.exception))
示例8: delete_doc
def delete_doc(self, doc, datastore_name=""):
if not datastore_name:
datastore_name = self.datastore_name
try:
datastore_dict = self.root[datastore_name]
except KeyError:
raise BadRequest('Data store ' + datastore_name + ' does not exist.')
if type(doc) is str:
object_id = doc
else:
object_id = doc["_id"]
log.info('Deleting object %s/%s' % (datastore_name, object_id))
if object_id in datastore_dict.keys():
if self._is_in_association(object_id, datastore_name):
obj = self.read(object_id, "", datastore_name)
log.warn("XXXXXXX Attempt to delete object %s that still has associations" % str(obj))
# raise BadRequest("Object cannot be deleted until associations are broken")
# Find all version dicts and delete them
for key in datastore_dict.keys():
if key.find(object_id + '_version_') == 0:
del datastore_dict[key]
# Delete the HEAD dict
del datastore_dict[object_id]
# Delete the version counter dict
del datastore_dict['__' + object_id + '_version_counter']
else:
raise NotFound('Object with id ' + object_id + ' does not exist.')
log.info('Delete result: True')
示例9: _cleanup_outdated_entries
def _cleanup_outdated_entries(self, dir_entries, common="key"):
"""
This function takes all DirEntry from the list and removes all but the most recent one
by ts_updated timestamp. It returns the most recent DirEntry and removes the others by
direct datastore operations. If there are multiple entries with most recent timestamp, the
first encountered is kept and the others non-deterministically removed.
Note: This operation can be called for DirEntries without common keys, e.g. for all
entries registering an agent for a device.
"""
if not dir_entries:
return
newest_entry = dir_entries[0]
try:
for de in dir_entries:
if int(de.ts_updated) > int(newest_entry.ts_updated):
newest_entry = de
remove_list = [de for de in dir_entries if de is not newest_entry]
log.info("Attempting to cleanup these directory entries: %s" % remove_list)
for de in remove_list:
try:
self.dir_store.delete(de)
except Exception as ex:
log.warn("Removal of outdated %s directory entry failed: %s" % (common, de))
log.info("Cleanup of %s old %s directory entries succeeded" % (len(remove_list), common))
except Exception as ex:
log.warn("Cleanup of multiple directory entries for %s failed: %s" % (
common, str(ex)))
return newest_entry
示例10: _execute
def _execute(self, cprefix, command):
if not command:
raise iex.BadRequest("execute argument 'command' not present")
if not command.command:
raise iex.BadRequest("command not set")
cmd_res = IonObject("AgentCommandResult", command_id=command.command_id, command=command.command)
cmd_func = getattr(self, cprefix + str(command.command), None)
if cmd_func:
cmd_res.ts_execute = get_ion_ts()
try:
res = cmd_func(*command.args, **command.kwargs)
cmd_res.status = 0
cmd_res.result = res
except iex.IonException as ex:
# TODO: Distinguish application vs. uncaught exception
cmd_res.status = getattr(ex, 'status_code', -1)
cmd_res.result = str(ex)
log.warn("Agent command %s failed with trace=%s" % (command.command, traceback.format_exc()))
else:
log.info("Agent command not supported: %s" % (command.command))
ex = iex.NotFound("Command not supported: %s" % command.command)
cmd_res.status = iex.NotFound.status_code
cmd_res.result = str(ex)
sub_type = "%s.%s" % (command.command, cmd_res.status)
post_event = self._event_publisher._create_event(event_type=self.COMMAND_EVENT_TYPE,
origin=self.resource_id, origin_type=self.ORIGIN_TYPE,
sub_type=sub_type, command=command, result=cmd_res)
post_event = self._post_execute_event_hook(post_event)
success = self._event_publisher._publish_event(post_event, origin=post_event.origin)
return cmd_res
示例11: service_policy_event_callback
def service_policy_event_callback(self, *args, **kwargs):
service_policy_event = args[0]
log.debug('Service related policy event received: %s', str(service_policy_event.__dict__))
policy_id = service_policy_event.origin
service_name = service_policy_event.service_name
service_op = service_policy_event.op
if service_name:
if self.container.proc_manager.is_local_service_process(service_name):
self.update_service_access_policy(service_name, service_op)
elif self.container.proc_manager.is_local_agent_process(service_name):
self.update_service_access_policy(service_name, service_op)
else:
if self.policy_decision_point_manager is not None:
try:
rules = self.policy_client.get_active_service_access_policy_rules('', self._container_org_name)
self.policy_decision_point_manager.load_common_service_policy_rules(rules)
#Reload all policies for existing services
for service_name in self.policy_decision_point_manager.get_list_service_policies():
if self.container.proc_manager.is_local_service_process(service_name):
self.update_service_access_policy(service_name)
except Exception, e:
#If the resource does not exist, just ignore it - but log a warning.
log.warn("There was an error applying access policy: %s" % e.message)
示例12: start
def start(self):
log.debug("GovernanceController starting ...")
self._CFG = CFG
self.enabled = CFG.get_safe('interceptor.interceptors.governance.config.enabled', False)
if not self.enabled:
log.warn("GovernanceInterceptor disabled by configuration")
self.policy_event_subscriber = None
# Containers default to not Org Boundary and ION Root Org
self._is_container_org_boundary = CFG.get_safe('container.org_boundary', False)
self._container_org_name = CFG.get_safe('container.org_name', CFG.get_safe('system.root_org', 'ION'))
self._container_org_id = None
self._system_root_org_name = CFG.get_safe('system.root_org', 'ION')
self._is_root_org_container = (self._container_org_name == self._system_root_org_name)
self.system_actor_id = None
self.system_actor_user_header = None
self.rr_client = ResourceRegistryServiceProcessClient(process=self.container)
self.policy_client = PolicyManagementServiceProcessClient(process=self.container)
if self.enabled:
config = CFG.get_safe('interceptor.interceptors.governance.config')
self.initialize_from_config(config)
self.policy_event_subscriber = EventSubscriber(event_type=OT.PolicyEvent, callback=self.policy_event_callback)
self.policy_event_subscriber.start()
self._policy_snapshot = self._get_policy_snapshot()
self._log_policy_update("start_governance_ctrl", message="Container start")
示例13: on_channel_close
def on_channel_close(self, code, text):
"""
Callback for when the Pika channel is closed.
"""
logmeth = log.debug
if not (code == 0 or code == 200):
logmeth = log.error
logmeth(
"BaseChannel.on_channel_close\n\tchannel number: %s\n\tcode: %d\n\ttext: %s",
self.get_channel_id(),
code,
text,
)
# remove amq_chan so we don't try to use it again
# (all?) calls are protected via _ensure_amq_chan, which raise a ChannelError if you try to do anything with it.
self._amq_chan = None
# make callback if it exists!
if not (code == 0 or code == 200) and self._closed_error_callback:
# run in try block because this can shutter the entire connection
try:
self._closed_error_callback(self, code, text)
except Exception, e:
log.warn("Closed error callback caught an exception: %s", str(e))
示例14: stop
def stop(self):
log.debug("ProcManager stopping ...")
from pyon.datastore.couchdb.couchdb_datastore import CouchDB_DataStore
stats1 = CouchDB_DataStore._stats.get_stats()
# Call quit on procs to give them ability to clean up
# @TODO terminate_process is not gl-safe
# gls = map(lambda k: spawn(self.terminate_process, k), self.procs.keys())
# join(gls)
procs_list = sorted(self.procs.values(), key=lambda proc: proc._proc_start_time, reverse=True)
for proc in procs_list:
self.terminate_process(proc.id)
# TODO: Have a choice of shutdown behaviors for waiting on children, timeouts, etc
self.proc_sup.shutdown(CFG.cc.timeout.shutdown)
if self.procs:
log.warn("ProcManager procs not empty: %s", self.procs)
if self.procs_by_name:
log.warn("ProcManager procs_by_name not empty: %s", self.procs_by_name)
# Remove Resource registration
self.container.resource_registry.delete(self.cc_id, del_associations=True)
# TODO: Check associations to processes
stats2 = CouchDB_DataStore._stats.get_stats()
stats3 = CouchDB_DataStore._stats.diff_stats(stats2, stats1)
log.debug("Datastore stats difference during stop(): %s", stats3)
log.debug("ProcManager stopped, OK.")
示例15: deactivate_data_process
def deactivate_data_process(self, data_process_id=""):
data_process_obj = self.read_data_process(data_process_id)
if not data_process_obj.input_subscription_id:
log.warn("data process '%s' has no subscription id to deactivate", data_process_id)
return
subscription_obj = self.clients.pubsub_management.read_subscription(data_process_obj.input_subscription_id)
if subscription_obj.activated:
#update the producer context with the deactivation time
# todo: update the setting of this contect with the return vals from process_dispatcher:schedule_process after convert
producer_obj = self._get_process_producer(data_process_id)
producertype = type(producer_obj).__name__
if producer_obj.producer_context.type_ == OT.DataProcessProducerContext :
log.debug("data_process '%s' (producer '%s'): deactivation_time = %s ",
data_process_id, producer_obj._id, str(IonTime().to_string()))
producer_obj.producer_context.deactivation_time = IonTime().to_string()
self.clients.resource_registry.update(producer_obj)
subscription_id = data_process_obj.input_subscription_id
log.debug("Deactivating subscription '%s'", subscription_id)
self.clients.pubsub_management.deactivate_subscription(subscription_id=subscription_id)