本文整理汇总了Python中pyon.util.log.log.info函数的典型用法代码示例。如果您正苦于以下问题:Python info函数的具体用法?Python info怎么用?Python info使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了info函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: time_it
def time_it(msg="step"):
t1 = time.time()
try:
yield
finally:
t2 = time.time()
log.info("Time %s: %1.7f", msg, (t2-t1))
示例2: _force_clean
def _force_clean(cls, recreate=False, initial=False):
# Database resources
from pyon.core.bootstrap import get_sys_name, CFG
from pyon.datastore.datastore_common import DatastoreFactory
datastore = DatastoreFactory.get_datastore(config=CFG, variant=DatastoreFactory.DS_BASE, scope=get_sys_name())
if initial:
datastore._init_database(datastore.database)
dbs = datastore.list_datastores()
clean_prefix = '%s_' % get_sys_name().lower()
things_to_clean = [x for x in dbs if x.startswith(clean_prefix)]
try:
for thing in things_to_clean:
datastore.delete_datastore(datastore_name=thing)
if recreate:
datastore.create_datastore(datastore_name=thing)
finally:
datastore.close()
# Broker resources
from putil.rabbitmq.rabbit_util import RabbitManagementUtil
rabbit_util = RabbitManagementUtil(CFG, sysname=bootstrap.get_sys_name())
deleted_exchanges, deleted_queues = rabbit_util.clean_by_sysname()
log.info("Deleted %s exchanges, %s queues" % (len(deleted_exchanges), len(deleted_queues)))
# File system
from pyon.util.file_sys import FileSystem
FileSystem._clean(CFG)
示例3: terminate_process
def terminate_process(self, process_id, do_notifications=True):
"""
Terminates a process and all its resources. Termination is graceful with timeout.
@param process_id The id of the process to terminate. Should exist in the container's
list of processes or this will raise.
@param do_notifications If True, emits process state changes for TERMINATING and TERMINATED.
If False, supresses any state changes. Used near EXITED and FAILED.
"""
process_instance = self.procs.get(process_id, None)
if not process_instance:
raise BadRequest("Cannot terminate. Process id='%s' unknown on container id='%s'" % (
process_id, self.container.id))
log.info("ProcManager.terminate_process: %s -> pid=%s", process_instance._proc_name, process_id)
if do_notifications:
self._call_proc_state_changed(process_instance, ProcessStateEnum.TERMINATING)
self._process_quit(process_instance)
self._unregister_process(process_id, process_instance)
if do_notifications:
self._call_proc_state_changed(process_instance, ProcessStateEnum.TERMINATED)
示例4: update_data_process_inputs
def update_data_process_inputs(self, data_process_id="", in_stream_ids=None):
#@TODO: INPUT STREAM VALIDATION
log.debug("Updating inputs to data process '%s'", data_process_id)
data_process_obj = self.clients.resource_registry.read(data_process_id)
subscription_id = data_process_obj.input_subscription_id
was_active = False
if subscription_id:
# get rid of all the current streams
try:
log.debug("Deactivating subscription '%s'", subscription_id)
self.clients.pubsub_management.deactivate_subscription(subscription_id)
was_active = True
except BadRequest:
log.info('Subscription was not active')
self.clients.pubsub_management.delete_subscription(subscription_id)
new_subscription_id = self.clients.pubsub_management.create_subscription(data_process_obj.name,
stream_ids=in_stream_ids)
data_process_obj.input_subscription_id = new_subscription_id
self.clients.resource_registry.update(data_process_obj)
if was_active:
log.debug("Activating subscription '%s'", new_subscription_id)
self.clients.pubsub_management.activate_subscription(new_subscription_id)
示例5: on_start
def on_start(self):
'''
Starts the process
'''
log.info('Replay Process Started')
super(ReplayProcess,self).on_start()
dsm_cli = DatasetManagementServiceProcessClient(process=self)
pubsub = PubsubManagementServiceProcessClient(process=self)
self.dataset_id = self.CFG.get_safe('process.dataset_id', None)
self.delivery_format = self.CFG.get_safe('process.delivery_format',{})
self.start_time = self.CFG.get_safe('process.query.start_time', None)
self.end_time = self.CFG.get_safe('process.query.end_time', None)
self.stride_time = self.CFG.get_safe('process.query.stride_time', None)
self.parameters = self.CFG.get_safe('process.query.parameters',None)
self.publish_limit = self.CFG.get_safe('process.query.publish_limit', 10)
self.tdoa = self.CFG.get_safe('process.query.tdoa',None)
self.stream_id = self.CFG.get_safe('process.publish_streams.output', '')
self.stream_def = pubsub.read_stream_definition(stream_id=self.stream_id)
self.stream_def_id = self.stream_def._id
self.publishing.clear()
self.play.set()
self.end.clear()
if self.dataset_id is None:
raise BadRequest('dataset_id not specified')
self.dataset = dsm_cli.read_dataset(self.dataset_id)
self.pubsub = PubsubManagementServiceProcessClient(process=self)
示例6: on_initial_bootstrap
def on_initial_bootstrap(self, process, config, **kwargs):
if os.environ.get('PYCC_MODE'):
# This environment is an ion integration test
log.info('PYCC_MODE: skipping qc_post_processor launch')
return
if self.process_exists(process, 'qc_post_processor'):
# Short circuit the bootstrap to make sure not more than one is ever started
return
self.scheduler_service = SchedulerServiceProcessClient(process=process)
self.process_dispatcher = ProcessDispatcherServiceProcessClient(process=process)
self.run_interval = CFG.get_safe('service.qc_processing.run_interval', 24)
interval_key = uuid4().hex # Unique identifier for this process
config = DotDict()
config.process.interval_key = interval_key
process_definition = ProcessDefinition(name='qc_post_processor',
executable={'module':'ion.processes.data.transforms.qc_post_processing', 'class':'QCPostProcessing'})
process_definition_id = self.process_dispatcher.create_process_definition(process_definition)
process_id = self.process_dispatcher.create_process(process_definition_id)
self.process_dispatcher.schedule_process(process_definition_id, process_id=process_id, configuration=config)
timer_id = self.scheduler_service.create_interval_timer(start_time=str(time.time()),
end_time='-1', #Run FOREVER
interval=3600*self.run_interval,
event_origin=interval_key)
示例7: delete_doc
def delete_doc(self, doc, datastore_name=""):
if not datastore_name:
datastore_name = self.datastore_name
try:
db = self.server[datastore_name]
except ValueError:
raise BadRequest("Data store name %s is invalid" % datastore_name)
if type(doc) is str:
log.info('Deleting object %s/%s' % (datastore_name, doc))
if self._is_in_association(doc, datastore_name):
obj = self.read(doc, datastore_name)
log.warn("XXXXXXX Attempt to delete object %s that still has associations" % str(obj))
# raise BadRequest("Object cannot be deleted until associations are broken")
try:
del db[doc]
except ResourceNotFound:
raise NotFound('Object with id %s does not exist.' % str(doc))
else:
log.info('Deleting object %s/%s' % (datastore_name, doc["_id"]))
if self._is_in_association(doc["_id"], datastore_name):
log.warn("XXXXXXX Attempt to delete object %s that still has associations" % str(doc))
# raise BadRequest("Object cannot be deleted until associations are broken")
try:
res = db.delete(doc)
except ResourceNotFound:
raise NotFound('Object with id %s does not exist.' % str(doc["_id"]))
log.debug('Delete result: %s' % str(res))
示例8: check_localrange
def check_localrange(self):
log.info('check_localrange')
self.new_rdt()
t = np.array([3580144703.7555027, 3580144704.7555027, 3580144705.7555027, 3580144706.7555027, 3580144707.7555027, 3580144708.7555027, 3580144709.7555027, 3580144710.7555027, 3580144711.7555027, 3580144712.7555027])
pressure = np.random.rand(10) * 2 + 33.0
t_v = ntp_to_month(t)
dat = t_v + pressure + np.arange(16,26)
def lim1(p,m):
return p+m+10
def lim2(p,m):
return p+m+20
pressure_grid, month_grid = np.meshgrid(np.arange(0,150,10), np.arange(11))
points = np.column_stack([pressure_grid.flatten(), month_grid.flatten()])
datlim_0 = lim1(points[:,0], points[:,1])
datlim_1 = lim2(points[:,0], points[:,1])
datlim = np.column_stack([datlim_0, datlim_1])
datlimz = points
self.svm.stored_value_cas('lrt_QCTEST_TEMPWAT', {'datlim':datlim.tolist(), 'datlimz':datlimz.tolist(), 'dims':['pressure', 'month']})
self.rdt['time'] = t
self.rdt['temp'] = dat
self.rdt['pressure'] = pressure
self.rdt.fetch_lookup_values()
np.testing.assert_array_equal(self.rdt['tempwat_loclrng_qc'], [1 ,1 ,1 ,1 ,1 ,0 ,0 ,0 ,0 ,0])
示例9: _execute
def _execute(self, cprefix, command):
if not command:
raise iex.BadRequest("execute argument 'command' not present")
if not command.command:
raise iex.BadRequest("command not set")
cmd_res = IonObject("AgentCommandResult", command_id=command.command_id, command=command.command)
cmd_func = getattr(self, cprefix + str(command.command), None)
if cmd_func:
cmd_res.ts_execute = get_ion_ts()
try:
res = cmd_func(*command.args, **command.kwargs)
cmd_res.status = 0
cmd_res.result = res
except Exception as ex:
# TODO: Distinguish application vs. uncaught exception
cmd_res.status = getattr(ex, 'status_code', -1)
cmd_res.result = str(ex)
log.info("Agent function failed with ex=%s msg=%s" % (type(ex), str(ex)))
else:
log.info("Agent command not supported: %s" % (command.command))
ex = iex.NotFound("Command not supported: %s" % command.command)
cmd_res.status = iex.NotFound.status_code
cmd_res.result = str(ex)
return cmd_res
示例10: start_listeners
def start_listeners(self):
"""
Starts all listeners in managed greenlets.
This must be called after starting this IonProcess. Currently, the Container's ProcManager
will handle this for you, but if using an IonProcess manually, you must remember to call
this method or no attached listeners will run.
"""
try:
# disable normal error reporting, this method should only be called from startup
self.thread_manager._failure_notify_callback = None
# spawn all listeners in startup listeners (from initializer, or added later)
for listener in self._startup_listeners:
self.add_endpoint(listener)
with Timeout(10):
waitall([x.get_ready_event() for x in self.listeners])
except Timeout:
# remove failed endpoints before reporting failure above
for listener, proc in self._listener_map.iteritems():
if proc.proc.dead:
log.info("removed dead listener: %s", listener)
self.listeners.remove(listener)
self.thread_manager.children.remove(proc)
raise IonProcessError("start_listeners did not complete in expected time")
finally:
self.thread_manager._failure_notify_callback = self._child_failed
示例11: suspend
def suspend(self):
'''
Stops the event loop
'''
self.event.set()
self._thread.join(self.timeout)
log.info("QC Thread Suspended")
示例12: _load_capabilities
def _load_capabilities(self):
self._cap_initialized = [] # List of capability constants initialized in container
self._capabilities = [] # List of capability constants active in container
self._cap_instances = {} # Dict mapping capability->manager instance
self._cap_definitions = Config(["res/config/container_capabilities.yml"]).data['capabilities']
profile_filename = CFG.get_safe("container.profile", "development")
if not profile_filename.endswith(".yml"):
profile_filename = "res/profile/%s.yml" % profile_filename
log.info("Loading CC capability profile from file: %s", profile_filename)
profile_cfg = Config([profile_filename]).data
if not isinstance(profile_cfg, dict) or profile_cfg['type'] != "profile" or not "profile" in profile_cfg:
raise ContainerError("Container capability profile invalid: %s" % profile_filename)
self.cap_profile = profile_cfg['profile']
if "capabilities" in self.cap_profile and self.cap_profile['capabilities']:
dict_merge(self._cap_definitions, self.cap_profile['capabilities'], True)
CCAP.clear()
cap_list = self._cap_definitions.keys()
CCAP.update(zip(cap_list, cap_list))
if "config" in self.cap_profile and self.cap_profile['config']:
log.info("Container CFG was changed based on profile: %s", profile_filename)
示例13: _setup_session
def _setup_session(self):
# negotiate with the telnet client to have server echo characters
response = input = ''
# set socket to non-blocking
self.connection_socket.setblocking(0)
start_time = time.time()
self._write(self.WILL_ECHO_CMD)
while True:
try:
input = self.connection_socket.recv(100)
except gevent.socket.error, error:
if error[0] == errno.EAGAIN:
gevent.sleep(.1)
else:
log.info("TcpServer._setup_session(): exception caught <%s>" %str(error))
self._exit_handler("lost connection")
return False
if len(input) > 0:
response += input
if self.DO_ECHO_CMD in response:
# set socket back to blocking
self.connection_socket.setblocking(1)
return True
elif time.time() - start_time > 5:
self._exit_handler("session setup timed out")
self._writeline("session negotiation with telnet client failed, closing connection")
return False
示例14: memory_usage
def memory_usage(self):
"""
Get the current memory usage for the current driver process.
@returns memory usage in KB of the current driver process
"""
driver_pid = self.getpid()
if not driver_pid:
log.warn("no process running")
return 0
#ps_process = subprocess.Popen(["ps", "-p", self.getpid(), "-o", "rss,pid"])
ps_process = subprocess.Popen(["ps", "-o rss,pid", "-p %s" % self.getpid()], stdout=subprocess.PIPE)
retcode = ps_process.poll()
usage = 0
for line in ps_process.stdout:
if not line.strip().startswith('RSS'):
try:
fields = line.split()
pid = int(fields[1])
if pid == driver_pid:
usage = int(fields[0])
except:
log.warn("Failed to parse output for memory usage: %s" % line)
usage = 0
if usage:
log.info("process memory usage: %dk" % usage)
else:
log.warn("process not running")
return usage
示例15: _message_received
def _message_received(self, msg, headers):
"""
Internal _message_received override.
We need to be able to detect IonExceptions raised in the Interceptor stacks as well as in the actual
call to the op we're routing into. This override will handle the return value being sent to the caller.
"""
result = None
try:
result, response_headers = ResponseEndpointUnit._message_received(self, msg, headers) # execute interceptor stack, calls into our message_received
except IonException as ex:
(exc_type, exc_value, exc_traceback) = sys.exc_info()
tb_list = traceback.extract_tb(sys.exc_info()[2])
tb_list = traceback.format_list(tb_list)
tb_output = ""
for elt in tb_list:
tb_output += elt
log.debug("Got error response")
log.debug("Exception message: %s" % ex)
log.debug("Traceback:\n%s" % tb_output)
response_headers = self._create_error_response(ex)
# REPLIES: propogate protocol, conv-id, conv-seq
response_headers['protocol'] = headers.get('protocol', '')
response_headers['conv-id'] = headers.get('conv-id', '')
response_headers['conv-seq'] = headers.get('conv-seq', 1) + 1
log.info("MESSAGE SEND [S->D] RPC: %s" % str(msg))
return self.send(result, response_headers)