本文整理汇总了Python中multiprocessing.get_logger函数的典型用法代码示例。如果您正苦于以下问题:Python get_logger函数的具体用法?Python get_logger怎么用?Python get_logger使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了get_logger函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: ftp_download
def ftp_download(file_path, is_override, output_directory, uri, user, password, worker_semaphore, inserted_task_event, tasks, log_format, log_level):
"""
download a specify file from the given ftp server and output the specify directory
#>>> ftp_download('/tmp/archives.zip.002', True, 'f:/', '16.60.160.90', 'edwin', 'edwin')
#>>> os.path.isfile('f:/archives.zip.002')
#True
"""
try:
path, filename = os.path.split(file_path)
logger = multiprocessing.get_logger()
init_logger(logger, log_level, log_format)
ftp = ftplib.FTP(host=uri, user=user, passwd=password)
ftp.cwd(path)
output_file = os.path.join(output_directory, filename)
logger.info('Start downloading %s' % file_path)
if os.path.isfile(output_file) and not is_override:
return
if os.path.isfile(output_file):
os.remove(output_file)
ftp.retrbinary('RETR %s' % filename, lambda data: open(output_file, 'ab').write(data))
ftp.close()
logger.info('Complete file to %s' % output_file)
except Exception as e:
logger = multiprocessing.get_logger()
logger.error('Download %s failed, error info %s' % (file_path, e))
output_file = os.path.join(output_directory, filename)
if os.path.exists(output_file):
os.remove(output_file)
tasks.put(filename)
inserted_task_event.set()
finally:
logger = multiprocessing.get_logger()
logger.debug('Release lock %s' % id(worker_semaphore))
worker_semaphore.release()
示例2: main
def main(args):
try:
if args[0] == "filter":
return run_filter(*args[1:])
elif args[0] == "algo":
return run_algo(*args[1:])
except:
get_logger().error(traceback.format_exc())
return False
示例3: __init__
def __init__(self):
proc_count = cpu_count() / 5 * 4
if proc_count < 1:
proc_count = 1
self.pool = multiprocessing.Pool(proc_count, initializer=setup_jenkins_console_logger)
self.workspace_path = ""
self.runlist = []
self.params = {}
self.process_cli()
multiprocessing.get_logger().info("\n{0}\nIntegration tests runner started.\n{0}\n".format("*" * 80))
示例4: spawn_test_process
def spawn_test_process(test, flags):
spell = ["{0} {1}".format(test, flags)]
process = subprocess.Popen(spell, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
multiprocessing.get_logger().info(spell[0])
out, err = process.communicate()
# We need the out for getting the list of tests from an exec file
# by sending it the --list_tests flag
return test, filter(None, out.splitlines()), err, process.returncode
示例5: loop
def loop(self):
while True:
multiprocessing.get_logger().info("%s server: Waiting for signal" % self.sessionID)
self.event.wait()
while not self.queue.empty():
print("%s server: Got request signal", self.sessionID)
self.event.clear()
request = queues[x].get(False)
if isinstance(request, basestring) and request.lower() == "terminate":
# terminate signal received => save dataframe and exit event loop => process gets terminated
self.dataFrameManager.saveDataFrame()
break
else:
self.requestManager.processRequest(request)
示例6: __call__
def __call__(self, *args, **kwargs):
try:
result = self.__callable(*args, **kwargs)
except Exception as e:
# Here we add some debugging help. If multiprocessing's
# debugging is on, it will arrange to log the traceback
multiprocessing.get_logger().error(traceback.format_exc())
# Re-raise the original exception so the Pool worker can
# clean up
raise
# It was fine, give a normal answer
return result
示例7: setup_logger
def setup_logger(settings):
class ColorFilter(logging.Filter):
def filter(self, record):
if not hasattr(record, 'xcolor'):
record.xcolor = ''
return True
logger = multiprocessing.get_logger()
logger.setLevel(logging.DEBUG)
if settings['log_file_dir'] and settings['log_file_level']:
pname = multiprocessing.current_process().name
ctime = time.strftime('%Y-%m-%d_%H-%M-%S')
filename = os.path.join(settings['log_file_dir'],
'log_%s_%s.txt' % (pname, ctime))
filehandler = logging.FileHandler(filename, mode='w', encoding='utf8')
filehandler.setLevel(settings['log_file_level'])
fileformatter = logging.Formatter(
'[%(asctime)s] %(levelname)-8s %(message)s')
filehandler.setFormatter(fileformatter)
logger.addHandler(filehandler)
if settings['log_stderr_level']:
streamhandler = logging.StreamHandler(stream=sys.stderr)
streamhandler.setLevel(settings['log_stderr_level'])
streamhandler.addFilter(ColorFilter())
streamformatter = logging.Formatter(
'[%(levelname)s/%(processName)s] %(xcolor)s%(message)s' +
Colors.NORMAL)
streamhandler.setFormatter(streamformatter)
logger.addHandler(streamhandler)
return logger
示例8: invoke_cmd_worker
def invoke_cmd_worker(item):
try:
logger = multiprocessing.get_logger()
pid = multiprocessing.current_process().pid
plugin_dir, plugin, filepath, events_limit = item
worker_fpath = os.path.abspath(__file__)
cmd = 'gzip -d -c %s | python2.7 %s %s %s %s' % (
filepath, worker_fpath, plugin_dir, plugin, events_limit
)
logger.info(
'%d: Starting job: %s', pid, cmd
)
env = os.environ.copy()
env['PYTHONPATH'] = os.pathsep.join(sys.path)
process = subprocess.Popen(
cmd, stdout=subprocess.PIPE, shell=True,
env=env
)
output = process.communicate()[0]
return output
except Exception as e:
traceback.print_exc(e)
示例9: wrapped_function
def wrapped_function(*args, **kwargs):
global return_value
logger = multiprocessing.get_logger()
# create a pipe to retrieve the return value
parent_conn, child_conn = multiprocessing.Pipe()
# create and start the process
subproc = multiprocessing.Process(target=subprocess_func, name=" multiproc function call", args=(func,
child_conn,
mem_in_mb,
cpu_time_in_s,
wall_time_in_s,
num_processes) + args,
kwargs=kwargs)
logger.debug("Your function is called now.")
return_value = None
# start the process
subproc.start()
child_conn.close()
try:
# read the return value
return_value = parent_conn.recv()
except EOFError: # Don't see that in the unit tests :(
logger.debug("Your function call closed the pipe prematurely -> None will be returned")
return_value = None
except:
raise
finally:
# don't leave zombies behind
subproc.join()
return (return_value);
示例10: run
def run(plugin_name, start_date, end_date, plugin_dir,
data_dir='/mnt/disk1/alohalytics/by_date',
results_dir='./stats',
events_limit=0):
"""
Pyaloha stats processing pipeline:
0. Load worker, aggregator, processor classes from a specified plugin (script)
1. Run workers (data preprocessors) on alohalytics files within specified range
2. Accumulate [and postprocess] worker results with an aggregator instance
3. Run stats processor and print results to stdout
"""
aggregator = aggregate_raw_data(
data_dir, results_dir, plugin_dir, plugin_name,
start_date, end_date, events_limit
)
stats = load_plugin(
plugin_name, plugin_dir=plugin_dir
).StatsProcessor(aggregator)
logger = multiprocessing.get_logger()
logger.info('Stats: processing')
stats.process_stats()
logger.info('Stats: outputting')
stats.print_stats()
logger.info('Stats: done')
示例11: run_periodic_tasks
def run_periodic_tasks(self):
logger = get_logger()
applied = default_periodic_status_backend.run_periodic_tasks()
for task, task_id in applied:
logger.debug(
"PeriodicWorkController: Periodic task %s applied (%s)" % (
task.name, task_id))
示例12: __init__
def __init__(self, host, pipe, port=80, channels=None):
"""
Create a new client.
host : host to connect
pipe : pipe of paths
port : port to connect
channels : map of file descriptors
"""
asynchat.async_chat.__init__(self, map=channels)
self._log = multiprocessing.get_logger()
self._host = host
self._pipe = pipe
self._port = port
self._time = 0
self._htime = 0
self._path = ""
self._header = ""
self._body = ""
self._data = ""
self._protocol = ""
self._status = -1
self._status_msg = ""
self._close = False
self._chunked = True
self._content_length = -1
self.set_terminator(HTTPAsyncClient.TERMINATOR)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.connect((self._host, self._port))
self._log.debug(self.logmsg("HTTPAsyncClient connected to %s:%d",
self._host, self._port))
self.send_request()
示例13: smac_classpath
def smac_classpath():
"""
Small function gathering all information to build the java class path.
:returns: string representing the Java classpath for SMAC
"""
import multiprocessing
from pkg_resources import resource_filename
logger = multiprocessing.get_logger()
smac_folder = resource_filename("pysmac", 'smac/%s' % pysmac.remote_smac.SMAC_VERSION)
smac_conf_folder = os.path.join(smac_folder, "conf")
smac_patches_folder = os.path.join(smac_folder, "patches")
smac_lib_folder = os.path.join(smac_folder, "lib")
classpath = [fname for fname in os.listdir(smac_lib_folder) if fname.endswith(".jar")]
classpath = [os.path.join(smac_lib_folder, fname) for fname in classpath]
classpath = [os.path.abspath(fname) for fname in classpath]
classpath.append(os.path.abspath(smac_conf_folder))
classpath.append(os.path.abspath(smac_patches_folder))
# For Windows compability
classpath = (os.pathsep).join(classpath)
logger.debug("SMAC classpath: %s", classpath)
return classpath
示例14: error
def error(msg, *args):
"""Shortcut to multiprocessing's logger"""
############################################################# DEBUG
import sys
sys.stdout.flush()
############################################################# DEBUG
return mp.get_logger().error(msg, *args)
示例15: test_log
def test_log():
multiprocessing.log_to_stderr()
logger = multiprocessing.get_logger()
logger.setLevel(logging.INFO)
t1 = time.time()
print(time.time() - t1)
logger.info("done")