本文整理汇总了Python中multiprocessing.managers.SyncManager.shutdown方法的典型用法代码示例。如果您正苦于以下问题:Python SyncManager.shutdown方法的具体用法?Python SyncManager.shutdown怎么用?Python SyncManager.shutdown使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类multiprocessing.managers.SyncManager
的用法示例。
在下文中一共展示了SyncManager.shutdown方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main_proc
# 需要导入模块: from multiprocessing.managers import SyncManager [as 别名]
# 或者: from multiprocessing.managers.SyncManager import shutdown [as 别名]
def main_proc():
pid = os.getpid()
# initialize manager
mgr = SyncManager()
mgr.start(mgr_init)
try:
# Create share object between processes
shared_queue = mgr.Queue()
# Create subprocesses
put_proc = Process(target=put_data_proc, args=(shared_queue,))
put_proc_1 = Process(target=put_data_proc_1, args=(shared_queue,))
get_proc = Process(target=get_data_proc, args=(shared_queue,))
# Start the processes
put_proc.start()
put_proc_1.start()
get_proc.start()
# Join the processes until they finished
put_proc.join()
put_proc_1.join()
get_proc.join()
except KeyboardInterrupt:
print "Main process (pid=%s) was interruptted" % pid
finally:
mgr.shutdown()
示例2: Queue
# 需要导入模块: from multiprocessing.managers import SyncManager [as 别名]
# 或者: from multiprocessing.managers.SyncManager import shutdown [as 别名]
class Queue():
"""Class Queue
"""
__type = None
__manager = None
__address = None
__authkey = None
def __init__(self, qtype, address, authkey=''):
"""Class constructor
Called when object is initialized
Args:
qtype (int): queue type, server|client
address (str): queue address
authkey (str): authentication key
Raises:
error: ValueError
"""
if type in (queue.QUEUE_TYPE_SERVER, queue.QUEUE_TYPE_CLIENT):
self.__type = qtype
else:
raise ValueError('Invalid Queue type')
''' Checking for address format AF_INET '''
if address.find(':') > 0:
address = address.split(':')
self.__address = address
self.__authkey = authkey
def create(self):
"""Methods creates queue server
Args:
none
Returns:
void
Raises:
error: ValueError
"""
if self.__type != queue.QUEUE_TYPE_SERVER:
raise ValueError(
'This operation cannot be done on this queue type')
q = Queue()
SyncManager.register('get_queue', callable=lambda: q)
self.__manager = SyncManager(self.__address, self.__authkey)
self.__manager.start()
def destroy(self):
"""Methods destroys queue
Args:
none
Returns:
void
"""
self.__manager.shutdown()
def connect(self):
"""Methods connects to queue
Args:
none
Returns:
void
Raises:
error: ValueError
"""
if self.__type != queue.QUEUE_TYPE_CLIENT:
raise ValueError(
'This operation cannot be done on this queue type')
q = Queue()
SyncManager.register('get_queue', callable=lambda: q)
self.__manager = SyncManager(self.__address, self.__authkey)
self.__manager.connect()
示例3: perform_analysis
# 需要导入模块: from multiprocessing.managers import SyncManager [as 别名]
# 或者: from multiprocessing.managers.SyncManager import shutdown [as 别名]
#.........这里部分代码省略.........
if 'stats_enabled' in args:
config_map[ClangSA.ANALYZER_NAME].set_checker_enabled(
SpecialReturnValueCollector.checker_analyze)
config_map[ClangSA.ANALYZER_NAME].set_checker_enabled(
ReturnValueCollector.checker_analyze)
# Save some metadata information.
versions = __get_analyzer_version(context, config_map)
metadata['versions'].update(versions)
metadata['checkers'] = {}
for analyzer in analyzers:
metadata['checkers'][analyzer] = {}
for check, data in config_map[analyzer].checks().items():
enabled, _ = data
metadata['checkers'][analyzer].update({check: enabled})
if ctu_collect:
shutil.rmtree(ctu_dir, ignore_errors=True)
elif ctu_analyze and not os.path.exists(ctu_dir):
LOG.error("CTU directory: '%s' does not exist.", ctu_dir)
return
start_time = time.time()
# Use Manager to create data objects which can be
# safely shared between processes.
manager = SyncManager()
manager.start(__mgr_init)
config_map = manager.dict(config_map)
actions_map = create_actions_map(actions, manager)
# Setting to not None value will enable statistical analysis features.
statistics_data = __get_statistics_data(args, manager)
if ctu_collect or statistics_data:
ctu_data = None
if ctu_collect or ctu_analyze:
ctu_data = manager.dict({'ctu_dir': ctu_dir,
'ctu_func_map_file': 'externalFnMap.txt',
'ctu_temp_fnmap_folder':
'tmpExternalFnMaps'})
pre_analyze = [a for a in actions
if a.analyzer_type == ClangSA.ANALYZER_NAME]
pre_analysis_manager.run_pre_analysis(pre_analyze,
context,
config_map,
args.jobs,
skip_handler,
ctu_data,
statistics_data,
manager)
if 'stats_output' in args and args.stats_output:
return
if 'stats_dir' in args and args.stats_dir:
statistics_data = manager.dict({'stats_out_dir': args.stats_dir})
ctu_reanalyze_on_failure = 'ctu_reanalyze_on_failure' in args and \
args.ctu_reanalyze_on_failure
if ctu_analyze or statistics_data or (not ctu_analyze and not ctu_collect):
LOG.info("Starting static analysis ...")
analysis_manager.start_workers(actions_map, actions, context,
config_map, args.jobs,
args.output_path,
skip_handler,
metadata,
'quiet' in args,
'capture_analysis_output' in args,
args.timeout if 'timeout' in args
else None,
ctu_reanalyze_on_failure,
statistics_data,
manager)
LOG.info("Analysis finished.")
LOG.info("To view results in the terminal use the "
"\"CodeChecker parse\" command.")
LOG.info("To store results use the \"CodeChecker store\" command.")
LOG.info("See --help and the user guide for further options about"
" parsing and storing the reports.")
LOG.info("----=================----")
end_time = time.time()
LOG.info("Analysis length: %s sec.", end_time - start_time)
metadata['timestamps'] = {'begin': start_time,
'end': end_time}
if ctu_collect and ctu_analyze:
shutil.rmtree(ctu_dir, ignore_errors=True)
manager.shutdown()
示例4: StorageManager
# 需要导入模块: from multiprocessing.managers import SyncManager [as 别名]
# 或者: from multiprocessing.managers.SyncManager import shutdown [as 别名]
class StorageManager(object):
'''
Manager for per sensor persistent storage.
Uses `multiprocessing.managers.SyncManager` to give sensors access
to a dict-like structure, which automagically synchronizes with
the main process.
Values are stored in sqlite as stringified JSON documents.
'''
# TODO #1505: check if storage manager is pickable
def __init__(self, sqlite_factory):
'''
Initializes sync manager and logger.
'''
self.log = logger(self.__class__.__name__)
self.storages = {}
self.manager = SyncManager()
def ignore_signals():
'''
Ignores SIGINT and SIGTERM.
We don't want them propagated to SyncManager, because
we want to store its' state to disk on Agent shutdown.
'''
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGTERM, signal.SIG_IGN)
self.manager.start(ignore_signals)
self.ppid = None
self.sqliteconn = sqlite_factory()
def get_storage(self, name):
'''
Retrieves storage for given name. If such storage doesn't exist,
a new one, possibly with data got from sqlite, will be created.
Note that name is not necessarily sensor's name. In fact,
most of the time it will be sensor_name+hash(sensor_config).
This way we can differentiate storages within one sensor type.
'''
self.log.debug('Storage requested for `{}`'.format(name))
if name in self.storages:
return self.storages[name]
cursor = self.sqliteconn.cursor()
cursor.execute("SELECT value FROM sensorstorage WHERE key=?", (name,))
try:
storage_data = json.loads(cursor.fetchone())
# Catching too general exception
# pylint: disable=W0703
except Exception:
storage_data = {}
# Instance of 'SyncManager' has no 'dict' member
# pylint: disable=E1101
self.storages[name] = self.manager.dict(storage_data)
return self.storages[name]
def shutdown(self):
'''
Flushes all remaining storages into sqlite
and shuts down manager.
'''
self.log.debug('Storage manager: Started shutdown')
cursor = self.sqliteconn.cursor()
for sensor, store in self.storages.iteritems():
cursor.execute(
'INSERT OR REPLACE INTO sensorstorage'
' (key, value) VALUES (?,?)',
(sensor, json.dumps(dict(store))),
)
self.sqliteconn.commit()
self.sqliteconn.close()
self.manager.shutdown()
self.log.debug('Storage manager: Finished shutdown')
示例5: print
# 需要导入模块: from multiprocessing.managers import SyncManager [as 别名]
# 或者: from multiprocessing.managers.SyncManager import shutdown [as 别名]
msg_type = m.group(1)
msg_from = m.group(2)
if msg_type == "STOPPED":
print("{0} message recv'd from {1}".format(msg, msg_from))
collectors[msg_from].running = False
if number_running(collectors) == 0:
print("No one is running - shutting down - I wouldn't really do this for realsies")
cont = 0
elif msg_type is not None:
print("Parent recv'd a {0} message from {1} - {2}".format(msg_type, msg_from, msg))
sleep(1)
except:
print("Error checking pipes")
# Send a poison pill for each collector in order to signal a terminate
for each in collectors:
collectors[each].parent_p.send(None)
# Wait for the collectors to finish - this will change to a loop for
# new collector plugins
try:
for each in collectors:
collectors[each].process.join()
except KeyboardInterrupt:
print "Keyboard interrupt in main"
finally:
# to be safe -- explicitly shutting down the manager
manager.shutdown()
示例6: getImgFile
# 需要导入模块: from multiprocessing.managers import SyncManager [as 别名]
# 或者: from multiprocessing.managers.SyncManager import shutdown [as 别名]
#.........这里部分代码省略.........
cfg.set('imageurl', 'bad_domain', json.JSONEncoder(ensure_ascii = False, separators = (',', ':')).encode(self.bad_domain).replace(',"', ',\n"'))
self.use_proxy = list(set(self.use_proxy))
self.use_proxy.sort(key=lambda x: '.'.join(reversed(x.split('.'))) )
cfg.set('imageurl', 'use_proxy', json.JSONEncoder(ensure_ascii = False, separators = (',', ':')).encode(self.use_proxy).replace(',"', ',\n"'))
cfg.set('reply_id','maxid', str(self.processed_maxid))
cfg.write(codecs.open(inifile, 'w', inifile_encoding))
def getImage(self, id_from=None, nr_limit=None, nr_process=5):
u'''从 数据库中取出待获取实际文件的图片链接'''
info, debug = self.logger.info, self.logger.debug
docnt = 0
if nr_process:
queue = Queue(nr_process*3)
info('creating worker processes (%d)...',nr_process)
self.loadCfg()
self.setSocket(5, 1)
if not self.multiprocess_manager:
self.multiprocess_manager = SyncManager()#SyncManager(('',58585))
self.multiprocess_manager.start()
self.bad_domain = self.multiprocess_manager.list(self.bad_domain)
self.use_proxy = self.multiprocess_manager.list(self.use_proxy)
self.lck4baddomain = self.multiprocess_manager.Lock()
self.lck4useproxy = self.multiprocess_manager.Lock()
self.lck4mcnt = self.multiprocess_manager.Lock()
# logger for multiprocess
self.mlog=get_logger()
mhandler=logging.StreamHandler()
mhandler.setFormatter(logging.Formatter('%(processName)s %(funcName)s %(message)s', '%H:%M:%S'))
self.mlog.addHandler(mhandler)
self.mlog.setLevel(logging.INFO)
# event for suprocess to initiative exit.
shutdown= self.multiprocess_manager.Event()
# namespace for global counter, etc.
self.nms=self.multiprocess_manager.Namespace()
self.nms.mcnt=0
processes = []
for i in range(nr_process):
proc = Process(target=self.getImageProcess, name='worker-%d'%i,
args=(queue, shutdown, 'worker-%d'%i))
processes.append(proc)
## proc.name = proc.name.replace('Proces', 'myWorker-%d'%i)
proc.daemon = True
proc.start()
cnt, tmpminid, step = 0, 0, 500
if id_from:
tmpminid = id_from
tmp = ImgUrl.objects.aggregate(maxid = Max('id'))
maxid= tmp['maxid'] if tmp['maxid'] else 0
else:
## tmp = ImgUrl.objects.aggregate(minid = Min('id'), maxid = Max('id'))
tmp = ImgUrl.objects.filter(
img__isnull=True).exclude(
stat__exact=4).aggregate(minid = Min('id'), maxid = Max('id'))
tmpminid, maxid = tmp['minid'] if tmp['minid'] else 0, tmp['maxid'] if tmp['maxid'] else 0
info('%s\n\tget img from net, minid,maxid=%d,%d\n\n', '~~~~'*20,tmpminid, maxid)
itertimes = 0
while ( not nr_limit) or cnt <= nr_limit:
info('%s%d) cnt=%d, tmpminid=%d %s', '-='*15, itertimes, cnt, tmpminid, '-='*15)
## il=ImgUrl.objects.filter(id__gte=tmpminid,id__lt=tmpminid+step).filter(
## img__isnull=True).exclude(
## stat__exact=4).order_by('id').iterator()
示例7: SpiderEngine
# 需要导入模块: from multiprocessing.managers import SyncManager [as 别名]
# 或者: from multiprocessing.managers.SyncManager import shutdown [as 别名]
class SpiderEngine(object):
def __init__(self, cookie_file, url_queue_size, pg_queue_size, nr_downloadprocess, nr_parserprocess):
super(SpiderEngine, self).__init__()
self.logger = logging.getLogger(self.__class__.__name__)
self.multiprocess_manager = SyncManager()#SyncManager(('',58585))
self.multiprocess_manager.start()
self.lck4urlq=self.multiprocess_manager.Lock()
self.lck4pageq=self.multiprocess_manager.Lock()
# event for suprocess to initiative exit.
self.shutdown=self.multiprocess_manager.Event()
self.url_queue=Queue(url_queue_size)
self.page_queue=Queue(pg_queue_size)
self.url_hist=self.multiprocess_manager.dict()
self.urls= UrlScheduler(self.url_queue, self.url_hist, self.lck4urlq)
# init multiprocess log
self.mlog=get_logger()
mhandler=logging.StreamHandler()
mhandler.setFormatter(logging.Formatter('%(processName)s %(funcName)s() | %(message)s', '%H:%M:%S'))
self.mlog.addHandler(mhandler)
self.mlog.setLevel(logging.INFO)
self.pages= PageScheduler(self.urls, self.page_queue, self.lck4pageq)
self.downloader= PageDownloader(cookie_file, self.urls, self.pages, self.shutdown, self.multiprocess_manager, nr_downloadprocess, self.mlog)
self.parser=PageParser(self.urls, self.pages, self.shutdown, self.multiprocess_manager, nr_parserprocess, self.mlog)
def setSignalHandler(self):
global EXITEVENT
EXITEVENT=self.shutdown
if sys.platform=='linux2':
# 注册信号处理程序
## signal.signal(signal.SIGUSR1,signalHandler)
signal.signal(signal.SIGTERM,signalHandler)
def doWork(self, initurl, allowHost, blockHost, allowPattern, skipPattern, nr_limit):
info, debug=self.logger.info, self.logger.debug
self.parser.addHost(allowHost, blockHost)
self.parser.addPattern(allowPattern, skipPattern)
self.downloader.createProcesses()
self.parser.createProcesses()
info('wait 2 secs ...')
time.sleep(2)
self.setSignalHandler()
## debug('init url: %s',initurl)
self.urls.addUrl('test', initurl)
## debug('init url add.')
old= 0 # self.downloader.nms.mcnt
try:
while True:
if self.shutdown.wait(2):
info('shutdown event got.')
break
if self.downloader.nms.mcnt != old:
old=self.downloader.nms.mcnt
info('\n%s\n\tdownloader mcnt: %d\n%s', '-*'*30, old, '-*'*30)
if old>nr_limit:
info('exceed nr_limit %d>%d, break', self.downloader.nms.mcnt, nr_limit)
break
except KeyboardInterrupt:
info('got KeyboardInterrupt')
finally:
debug('\n%s', '~'*30)
self.downloader.getStat()
self.parser.getStat()
debug('\n%s', '~'*30)
self.exit()
debug('\n%s', '*~'*30)
## time.sleep(1)
def exit(self):
info, debug=self.logger.info, self.logger.debug
self.shutdown.set()
try:
while True:
self.page_queue.get_nowait()
except Empty:
pass
try:
while True:
self.url_queue.get_nowait()
except Empty:
pass
## info('page_queue is Empty: %s, %d', self.page_queue.empty(), self.page_queue.qsize())
## info('url_queue is Empty: %s, %d', self.url_queue.empty(), self.url_queue.qsize())
self.parser.closeProcesses()
try:
while True:
#.........这里部分代码省略.........
示例8: run_guesses
# 需要导入模块: from multiprocessing.managers import SyncManager [as 别名]
# 或者: from multiprocessing.managers.SyncManager import shutdown [as 别名]
def run_guesses(
hash_comp, cracker, algorithm,
prefix="", postfix="",
progress=False, debug=False
):
iter_stopped = False
threads_running = []
total_guessed = 0
total_started = 0
proc_manager = SyncManager()
proc_manager.start()
proc_returns = proc_manager.Queue(config.MAX_THREADS)
cracker.initialize()
while True:
# check return queue, and quit if the hash has completed cracking
ret = None
while not proc_returns.empty():
thread_guessed, answer = proc_returns.get()
total_guessed += thread_guessed
if answer is not False:
if debug: print("WIN:", answer)
for thread in threads_running:
thread.terminate()
ret = answer
if ret is not None:
proc_manager.shutdown()
return (ret, total_guessed)
# check if any threads terminated
dead_threads = 0
for i in xrange(0, len(threads_running)):
if not threads_running[i-dead_threads].is_alive():
threads_running.pop(i-dead_threads).terminate()
if progress: print("GUESSED SO FAR:", total_guessed)
dead_threads += 1
# spawn as many new threads as you can
threads_spawned = 0
if debug: print(iter_stopped, threads_running, config.MAX_THREADS)
while not iter_stopped and len(threads_running) < config.MAX_THREADS:
try:
guesses = cracker.next()
total_started += cracker.max_per_thread()
except StopIteration:
iter_stopped = True
break
p = guess_thread(
proc_manager, proc_returns,
hash_comp, guesses, algorithm,
prefix, postfix
)
p.start()
threads_running.append(p)
threads_spawned += 1
del guesses
if iter_stopped and len(threads_running) == 0 and proc_returns.empty():
return (False, total_guessed)
elif threads_spawned == 0:
time.sleep(config.WAIT_TIME)
continue