本文整理汇总了Python中multiprocessing.synchronize.Lock方法的典型用法代码示例。如果您正苦于以下问题:Python synchronize.Lock方法的具体用法?Python synchronize.Lock怎么用?Python synchronize.Lock使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类multiprocessing.synchronize
的用法示例。
在下文中一共展示了synchronize.Lock方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: Manager
# 需要导入模块: from multiprocessing import synchronize [as 别名]
# 或者: from multiprocessing.synchronize import Lock [as 别名]
def Manager():
'''
Returns a manager associated with a running server process
The managers methods such as `Lock()`, `Condition()` and `Queue()`
can be used to create shared objects.
'''
from multiprocessing.managers import SyncManager
m = SyncManager()
m.start()
return m
#brython fix me
#def Pipe(duplex=True):
# '''
# Returns two connection object connected by a pipe
# '''
# from multiprocessing.connection import Pipe
# return Pipe(duplex)
示例2: Manager
# 需要导入模块: from multiprocessing import synchronize [as 别名]
# 或者: from multiprocessing.synchronize import Lock [as 别名]
def Manager():
'''
Returns a manager associated with a running server process
The managers methods such as `Lock()`, `Condition()` and `Queue()`
can be used to create shared objects.
'''
from multiprocessing.managers import SyncManager
m = SyncManager()
m.start()
return m
示例3: __init__
# 需要导入模块: from multiprocessing import synchronize [as 别名]
# 或者: from multiprocessing.synchronize import Lock [as 别名]
def __init__(self, maxsize=0):
if maxsize <= 0:
maxsize = _multiprocessing.SemLock.SEM_VALUE_MAX
self._maxsize = maxsize
self._reader, self._writer = Pipe(duplex=False)
self._rlock = Lock()
self._opid = os.getpid()
if sys.platform == 'win32':
self._wlock = None
else:
self._wlock = Lock()
self._sem = BoundedSemaphore(maxsize)
self._after_fork()
if sys.platform != 'win32':
register_after_fork(self, Queue._after_fork)
示例4: Lock
# 需要导入模块: from multiprocessing import synchronize [as 别名]
# 或者: from multiprocessing.synchronize import Lock [as 别名]
def Lock():
'''
Returns a non-recursive lock object
'''
from multiprocessing.synchronize import Lock
return Lock()
示例5: _after_fork
# 需要导入模块: from multiprocessing import synchronize [as 别名]
# 或者: from multiprocessing.synchronize import Lock [as 别名]
def _after_fork(self):
debug('Queue._after_fork()')
self._notempty = threading.Condition(threading.Lock())
self._buffer = collections.deque()
self._thread = None
self._jointhread = None
self._joincancelled = False
self._closed = False
self._close = None
self._send = self._writer.send
self._recv = self._reader.recv
self._poll = self._reader.poll
示例6: encode_worker
# 需要导入模块: from multiprocessing import synchronize [as 别名]
# 或者: from multiprocessing.synchronize import Lock [as 别名]
def encode_worker(
realtime_vocoder: RealtimeVocoder,
time_length: float,
extra_time: float,
queue_input: Queue,
queue_output: Queue,
acquired_lock: Lock,
):
logger = logging.getLogger('encode')
init_logger(logger)
logger.info('encode worker')
stream = EncodeStream(vocoder=realtime_vocoder)
stream_wrapper = StreamWrapper(stream=stream, extra_time=extra_time)
acquired_lock.release()
start_time = extra_time
while True:
item: Item = queue_input.get()
start = time.time()
wave: numpy.ndarray = item.item
stream.add(start_time=start_time, data=wave)
start_time += time_length
feature_wrapper: AcousticFeatureWrapper = stream_wrapper.process_next(time_length=time_length)
item.item = feature_wrapper
queue_output.put(item)
logger.debug(f'{item.index}: {time.time() - start}')
示例7: create
# 需要导入模块: from multiprocessing import synchronize [as 别名]
# 或者: from multiprocessing.synchronize import Lock [as 别名]
def create(
cls, input_queue: IterableQueue, output_queues: OutputQueues, total_workers: int
) -> "StageParams":
return cls(
lock=multiprocessing.Lock(),
namespace=utils.Namespace(active_workers=total_workers),
input_queue=input_queue,
output_queues=output_queues,
)
示例8: Lock
# 需要导入模块: from multiprocessing import synchronize [as 别名]
# 或者: from multiprocessing.synchronize import Lock [as 别名]
def Lock(self):
"""Returns a lock object"""
from .synchronize import Lock
return Lock()
示例9: convert_worker
# 需要导入模块: from multiprocessing import synchronize [as 别名]
# 或者: from multiprocessing.synchronize import Lock [as 别名]
def convert_worker(
acoustic_converter: AcousticConverter,
super_resolution: SuperResolution,
time_length: float,
extra_time: float,
input_silent_threshold: float,
queue_input: Queue,
queue_output: Queue,
acquired_lock: Lock,
):
logger = logging.getLogger('convert')
init_logger(logger)
logging.info('convert worker')
chainer.global_config.enable_backprop = False
chainer.global_config.train = False
stream = ConvertStream(
voice_changer=VoiceChanger(
super_resolution=super_resolution,
acoustic_converter=acoustic_converter,
threshold=input_silent_threshold,
)
)
stream_wrapper = StreamWrapper(stream=stream, extra_time=extra_time)
acquired_lock.release()
start_time = extra_time
while True:
item: Item = queue_input.get()
start = time.time()
in_feature: AcousticFeatureWrapper = item.item
stream.add(
start_time=start_time,
data=in_feature,
)
start_time += time_length
out_feature = stream_wrapper.process_next(time_length=time_length)
item.item = out_feature
queue_output.put(item)
logger.debug(f'{item.index}: {time.time() - start}')
示例10: decode_worker
# 需要导入模块: from multiprocessing import synchronize [as 别名]
# 或者: from multiprocessing.synchronize import Lock [as 别名]
def decode_worker(
realtime_vocoder: RealtimeVocoder,
time_length: float,
extra_time: float,
vocoder_buffer_size: int,
out_audio_chunk: int,
output_silent_threshold: float,
queue_input: Queue,
queue_output: Queue,
acquired_lock: Lock,
):
logger = logging.getLogger('decode')
init_logger(logger)
logging.info('decode worker')
realtime_vocoder.create_synthesizer(
buffer_size=vocoder_buffer_size,
number_of_pointers=16,
)
stream = DecodeStream(vocoder=realtime_vocoder)
stream_wrapper = StreamWrapper(stream=stream, extra_time=extra_time)
acquired_lock.release()
start_time = extra_time
wave_fragment = numpy.empty(0)
while True:
item: Item = queue_input.get()
start = time.time()
feature: AcousticFeature = item.item
stream.add(
start_time=start_time,
data=feature,
)
start_time += time_length
wave = stream_wrapper.process_next(time_length=time_length)
wave_fragment = numpy.concatenate([wave_fragment, wave])
if len(wave_fragment) >= out_audio_chunk:
wave, wave_fragment = wave_fragment[:out_audio_chunk], wave_fragment[out_audio_chunk:]
power = librosa.core.power_to_db(numpy.abs(librosa.stft(wave)) ** 2).mean()
if power < - output_silent_threshold:
wave = None # pass
else:
wave = None
item.item = wave
queue_output.put(item)
logger.debug(f'{item.index}: {time.time() - start}')