本文整理汇总了Python中_emerge.SpawnProcess.SpawnProcess.wait方法的典型用法代码示例。如果您正苦于以下问题:Python SpawnProcess.wait方法的具体用法?Python SpawnProcess.wait怎么用?Python SpawnProcess.wait使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类_emerge.SpawnProcess.SpawnProcess
的用法示例。
在下文中一共展示了SpawnProcess.wait方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testLazyImportPortageBaseline
# 需要导入模块: from _emerge.SpawnProcess import SpawnProcess [as 别名]
# 或者: from _emerge.SpawnProcess.SpawnProcess import wait [as 别名]
def testLazyImportPortageBaseline(self):
"""
Check what modules are imported by a baseline module import.
"""
env = os.environ.copy()
pythonpath = env.get('PYTHONPATH')
if pythonpath is not None and not pythonpath.strip():
pythonpath = None
if pythonpath is None:
pythonpath = ''
else:
pythonpath = ':' + pythonpath
pythonpath = PORTAGE_PYM_PATH + pythonpath
env['PYTHONPATH'] = pythonpath
# If python is patched to insert the path of the
# currently installed portage module into sys.path,
# then the above PYTHONPATH override doesn't help.
env['PORTAGE_PYM_PATH'] = PORTAGE_PYM_PATH
scheduler = PollScheduler().sched_iface
master_fd, slave_fd = os.pipe()
master_file = os.fdopen(master_fd, 'rb', 0)
slave_file = os.fdopen(slave_fd, 'wb')
producer = SpawnProcess(
args=self._baseline_import_cmd,
env=env, fd_pipes={1:slave_fd},
scheduler=scheduler)
producer.start()
slave_file.close()
consumer = PipeReader(
input_files={"producer" : master_file},
scheduler=scheduler)
consumer.start()
consumer.wait()
self.assertEqual(producer.wait(), os.EX_OK)
self.assertEqual(consumer.wait(), os.EX_OK)
output = consumer.getvalue().decode('ascii', 'replace').split()
unexpected_modules = " ".join(sorted(x for x in output \
if self._module_re.match(x) is not None and \
x not in self._baseline_imports))
self.assertEqual("", unexpected_modules)
示例2: testLogfile
# 需要导入模块: from _emerge.SpawnProcess import SpawnProcess [as 别名]
# 或者: from _emerge.SpawnProcess.SpawnProcess import wait [as 别名]
def testLogfile(self):
logfile = None
try:
fd, logfile = tempfile.mkstemp()
os.close(fd)
null_fd = os.open('/dev/null', os.O_RDWR)
test_string = 2 * "blah blah blah\n"
proc = SpawnProcess(
args=[BASH_BINARY, "-c",
"echo -n '%s'" % test_string],
env={},
fd_pipes={
0: portage._get_stdin().fileno(),
1: null_fd,
2: null_fd
},
scheduler=global_event_loop(),
logfile=logfile)
proc.start()
os.close(null_fd)
self.assertEqual(proc.wait(), os.EX_OK)
f = io.open(_unicode_encode(logfile,
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['content'], errors='strict')
log_content = f.read()
f.close()
# When logging passes through a pty, this comparison will fail
# unless the oflag terminal attributes have the termios.OPOST
# bit disabled. Otherwise, tranformations such as \n -> \r\n
# may occur.
self.assertEqual(test_string, log_content)
finally:
if logfile:
try:
os.unlink(logfile)
except EnvironmentError as e:
if e.errno != errno.ENOENT:
raise
del e
示例3: _LockProcess
# 需要导入模块: from _emerge.SpawnProcess import SpawnProcess [as 别名]
# 或者: from _emerge.SpawnProcess.SpawnProcess import wait [as 别名]
class _LockProcess(AbstractPollTask):
"""
This uses the portage.locks module to acquire a lock asynchronously,
using a subprocess. After the lock is acquired, the process
writes to a pipe in order to notify a poll loop running in the main
process. The unlock() method notifies the subprocess to release the
lock and exit.
"""
__slots__ = ('path',) + \
('_acquired', '_kill_test', '_proc', '_files', '_reg_id', '_unlocked')
def _start(self):
in_pr, in_pw = os.pipe()
out_pr, out_pw = os.pipe()
self._files = {}
self._files['pipe_in'] = in_pr
self._files['pipe_out'] = out_pw
fcntl.fcntl(in_pr, fcntl.F_SETFL,
fcntl.fcntl(in_pr, fcntl.F_GETFL) | os.O_NONBLOCK)
self._reg_id = self.scheduler.register(in_pr,
self.scheduler.IO_IN, self._output_handler)
self._registered = True
self._proc = SpawnProcess(
args=[portage._python_interpreter,
os.path.join(portage._bin_path, 'lock-helper.py'), self.path],
env=dict(os.environ, PORTAGE_PYM_PATH=portage._pym_path),
fd_pipes={0:out_pr, 1:in_pw, 2:sys.stderr.fileno()},
scheduler=self.scheduler)
self._proc.addExitListener(self._proc_exit)
self._proc.start()
os.close(out_pr)
os.close(in_pw)
def _proc_exit(self, proc):
if self._files is not None:
# Close pipe_out if it's still open, since it's useless
# after the process has exited. This helps to avoid
# "ResourceWarning: unclosed file" since Python 3.2.
try:
pipe_out = self._files.pop('pipe_out')
except KeyError:
pass
else:
os.close(pipe_out)
if proc.returncode != os.EX_OK:
# Typically, this will happen due to the
# process being killed by a signal.
if not self._acquired:
# If the lock hasn't been aquired yet, the
# caller can check the returncode and handle
# this failure appropriately.
if not (self.cancelled or self._kill_test):
writemsg_level("_LockProcess: %s\n" % \
_("failed to acquire lock on '%s'") % (self.path,),
level=logging.ERROR, noiselevel=-1)
self._unregister()
self.returncode = proc.returncode
self.wait()
return
if not self.cancelled and \
not self._unlocked:
# We don't want lost locks going unnoticed, so it's
# only safe to ignore if either the cancel() or
# unlock() methods have been previously called.
raise AssertionError("lock process failed with returncode %s" \
% (proc.returncode,))
def _cancel(self):
if self._proc is not None:
self._proc.cancel()
def _poll(self):
if self._proc is not None:
self._proc.poll()
return self.returncode
def _output_handler(self, f, event):
buf = None
if event & self.scheduler.IO_IN:
try:
buf = os.read(self._files['pipe_in'], self._bufsize)
except OSError as e:
if e.errno not in (errno.EAGAIN,):
raise
if buf:
self._acquired = True
self._unregister()
self.returncode = os.EX_OK
self.wait()
return True
def _unregister(self):
self._registered = False
#.........这里部分代码省略.........