本文整理匯總了Python中multiprocessing.current_process方法的典型用法代碼示例。如果您正苦於以下問題:Python multiprocessing.current_process方法的具體用法?Python multiprocessing.current_process怎麽用?Python multiprocessing.current_process使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類multiprocessing
的用法示例。
在下文中一共展示了multiprocessing.current_process方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: cleanup_fifo
# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import current_process [as 別名]
def cleanup_fifo(self, fifo):
""" Safely cleanup a fifo that is an unknown state
Args:
fifo - The path to the fifo
"""
log.debug('{proc_id}: Cleanup of {fifo} started'
''.format(proc_id=multiprocessing.current_process().name,
fifo=fifo))
cat_proc = subprocess.Popen('timeout 5 cat {} >/dev/null'.format(fifo),
shell=True)
cat_proc.wait()
os.remove(fifo)
log.debug('{proc_id}: Cleanup of {fifo} complete'
''.format(proc_id=multiprocessing.current_process().name,
fifo=fifo))
示例2: upload_pitr_data
# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import current_process [as 別名]
def upload_pitr_data(self, db, tbl, pitr_data):
""" Upload a file of PITR data to s3 for each table
Args:
db - the db that was backed up.
tbl - the table that was backed up.
pitr_data - a dict of various data that might be helpful for running a
PITR
"""
zk = host_utils.MysqlZookeeper()
replica_set = zk.get_replica_set_from_instance(self.instance)
s3_path = PATH_PITR_DATA.format(replica_set=replica_set,
date=self.datestamp,
db_name=db, table=tbl)
log.debug('{proc_id}: {db}.{tbl} Uploading pitr data to {s3_path}'
''.format(s3_path=s3_path,
proc_id=multiprocessing.current_process().name,
db=db, tbl=tbl))
boto_conn = boto.connect_s3()
bucket = boto_conn.get_bucket(self.upload_bucket, validate=False)
key = bucket.new_key(s3_path)
key.set_contents_from_string(json.dumps(pitr_data))
示例3: upload_schema
# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import current_process [as 別名]
def upload_schema(self, db, table, tmp_dir_db):
""" Upload the schema of a table to s3
Args:
db - the db to be backed up
table - the table to be backed up
tmp_dir_db - temporary storage used for all tables in the db
"""
(schema_path, _, _) = backup.get_csv_backup_paths(
self.instance, db, table, self.datestamp)
create_stm = mysql_lib.show_create_table(self.instance, db, table)
log.debug('{proc_id}: Uploading schema to {schema_path}'
''.format(schema_path=schema_path,
proc_id=multiprocessing.current_process().name))
boto_conn = boto.connect_s3()
bucket = boto_conn.get_bucket(self.upload_bucket, validate=False)
key = bucket.new_key(schema_path)
key.set_contents_from_string(create_stm)
示例4: check_dict_of_procs
# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import current_process [as 別名]
def check_dict_of_procs(proc_dict):
""" Check a dict of process for exit, error, etc...
Args:
A dict of processes
Returns: True if all processes have completed with return status 0
False is some processes are still running
An exception is generated if any processes have completed with a
returns status other than 0
"""
success = True
for proc in proc_dict:
ret = proc_dict[proc].poll()
if ret is None:
# process has not yet terminated
success = False
elif ret != 0:
raise Exception('{proc_id}: {proc} encountered an error'
''.format(
proc_id=multiprocessing.current_process().name,
proc=proc))
return success
示例5: get_engine
# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import current_process [as 別名]
def get_engine():
cpid = multiprocessing.current_process().name
ctid = threading.current_thread().name
csid = "{}-{}".format(cpid, ctid)
if not csid in ENGINES:
with ENGINE_LOCK:
# Check if the engine was created while we were
# waiting on the lock.
if csid in ENGINES:
return ENGINES[csid]
log.info("INFO: Creating engine for process! Engine name: '%s'" % csid)
ENGINES[csid] = create_engine(SQLALCHEMY_DATABASE_URI,
isolation_level="READ COMMITTED")
# isolation_level="REPEATABLE READ")
return ENGINES[csid]
示例6: delete_db_session
# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import current_process [as 別名]
def delete_db_session(postfix="", flask_sess_if_possible=True):
if flags.IS_FLASK and flask_sess_if_possible:
# No need to do anything with flask sess
return
cpid = multiprocessing.current_process().name
ctid = threading.current_thread().name
csid = "{}-{}-{}".format(cpid, ctid, postfix)
# print("Releasing session for thread: %s" % csid)
# print(traceback.print_stack())
# print("==========================")
if csid in SESSIONS:
with SESSION_LOCK:
# check if the session was created while
# we were waiting for the lock
if not csid in SESSIONS:
return
SESSIONS[csid][1].close()
del SESSIONS[csid]
# print("Deleted session for id: ", csid)
示例7: test_staticmethod_multiprocessing_call
# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import current_process [as 別名]
def test_staticmethod_multiprocessing_call(self):
"""Make sure not-callable isn't raised for descriptors
astroid can't process descriptors correctly so
pylint needs to ignore not-callable for them
right now
Test for https://github.com/PyCQA/pylint/issues/1699
"""
call = astroid.extract_node(
"""
import multiprocessing
multiprocessing.current_process() #@
"""
)
with self.assertNoMessages():
self.checker.visit_call(call)
示例8: _get_listener
# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import current_process [as 別名]
def _get_listener():
global _listener
if _listener is None:
_lock.acquire()
try:
if _listener is None:
debug('starting listener and thread for sending handles')
_listener = Listener(authkey=current_process().authkey)
t = threading.Thread(target=_serve)
t.daemon = True
t.start()
finally:
_lock.release()
return _listener
示例9: serve_forever
# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import current_process [as 別名]
def serve_forever(self):
'''
Run the server forever
'''
current_process()._manager_server = self
try:
try:
while 1:
try:
c = self.listener.accept()
except (OSError, IOError):
continue
t = threading.Thread(target=self.handle_request, args=(c,))
t.daemon = True
t.start()
except (KeyboardInterrupt, SystemExit):
pass
finally:
self.stop = 999
self.listener.close()
示例10: RebuildProxy
# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import current_process [as 別名]
def RebuildProxy(func, token, serializer, kwds):
'''
Function used for unpickling proxy objects.
If possible the shared object is returned, or otherwise a proxy for it.
'''
server = getattr(current_process(), '_manager_server', None)
if server and server.address == token.address:
return server.id_to_obj[token.id][0]
else:
incref = (
kwds.pop('incref', True) and
not getattr(current_process(), '_inheriting', False)
)
return func(token, serializer, incref=incref, **kwds)
#
# Functions to create proxies and proxy types
#
示例11: run_warp_optical_flow
# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import current_process [as 別名]
def run_warp_optical_flow(vid_item, dev_id=0):
full_path, vid_path, vid_id = vid_item
vid_name = vid_path.split('.')[0]
out_full_path = osp.join(args.out_dir, vid_name)
try:
os.mkdir(out_full_path)
except OSError:
pass
current = current_process()
dev_id = (int(current._identity[0]) - 1) % args.num_gpu
flow_x_path = '{}/flow_x'.format(out_full_path)
flow_y_path = '{}/flow_y'.format(out_full_path)
cmd = osp.join(args.df_path + 'build/extract_warp_gpu') + \
' -f={} -x={} -y={} -b=20 -t=1 -d={} -s=1 -o={}'.format(
quote(full_path), quote(flow_x_path), quote(flow_y_path),
dev_id, args.out_format)
os.system(cmd)
print('warp on {} {} done'.format(vid_id, vid_name))
sys.stdout.flush()
return True
示例12: square
# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import current_process [as 別名]
def square(number):
"""The function squares whatever number it is provided."""
result = number * number
# We can use the OS module in Python to print out the process ID
# assigned to the call of this function assigned by the operating
# system.
proc_id = os.getpid()
print(f"Process ID: {proc_id}")
# We can also use the "current_process" function to get the name
# of the Process object:
process_name = current_process().name
print(f"Process Name: {process_name}")
print(f"The number {number} squares to {result}.")
示例13: launch_new_instance
# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import current_process [as 別名]
def launch_new_instance(*args, **kwargs):
"""Create and run the IPython controller"""
if sys.platform == 'win32':
# make sure we don't get called from a multiprocessing subprocess
# this can result in infinite Controllers being started on Windows
# which doesn't have a proper fork, so multiprocessing is wonky
# this only comes up when IPython has been installed using vanilla
# setuptools, and *not* distribute.
import multiprocessing
p = multiprocessing.current_process()
# the main process has name 'MainProcess'
# subprocesses will have names like 'Process-1'
if p.name != 'MainProcess':
# we are a subprocess, don't start another Controller!
return
return IPControllerApp.launch_instance(*args, **kwargs)
示例14: _init_child
# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import current_process [as 別名]
def _init_child(tmpdir):
'''
Initialize new process from multiprocessing module's Pool.
:param tmpdir: Path of local staging area.
'''
signal.signal(signal.SIGINT, signal.SIG_IGN)
# .................................for each process, create a isolated temp folder
proc_dir = os.path.join(tmpdir, current_process().name)
if not os.path.isdir(proc_dir):
os.mkdir(proc_dir)
示例15: myProcess
# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import current_process [as 別名]
def myProcess():
print("{} Just performed X".format(multiprocessing.current_process().name))