本文整理汇总了Python中portage.os.pipe函数的典型用法代码示例。如果您正苦于以下问题:Python pipe函数的具体用法?Python pipe怎么用?Python pipe使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了pipe函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _start
def _start(self):
in_pr, in_pw = os.pipe()
out_pr, out_pw = os.pipe()
self._files = {}
self._files['pipe_in'] = in_pr
self._files['pipe_out'] = out_pw
fcntl.fcntl(in_pr, fcntl.F_SETFL,
fcntl.fcntl(in_pr, fcntl.F_GETFL) | os.O_NONBLOCK)
# FD_CLOEXEC is enabled by default in Python >=3.4.
if sys.hexversion < 0x3040000:
try:
fcntl.FD_CLOEXEC
except AttributeError:
pass
else:
fcntl.fcntl(in_pr, fcntl.F_SETFD,
fcntl.fcntl(in_pr, fcntl.F_GETFD) | fcntl.FD_CLOEXEC)
self._reg_id = self.scheduler.io_add_watch(in_pr,
self.scheduler.IO_IN, self._output_handler)
self._registered = True
self._proc = SpawnProcess(
args=[portage._python_interpreter,
os.path.join(portage._bin_path, 'lock-helper.py'), self.path],
env=dict(os.environ, PORTAGE_PYM_PATH=portage._pym_path),
fd_pipes={0:out_pr, 1:in_pw, 2:sys.__stderr__.fileno()},
scheduler=self.scheduler)
self._proc.addExitListener(self._proc_exit)
self._proc.start()
os.close(out_pr)
os.close(in_pw)
示例2: _create_pty_or_pipe
def _create_pty_or_pipe(copy_term_size=None):
"""
Try to create a pty and if then fails then create a normal
pipe instead.
@param copy_term_size: If a tty file descriptor is given
then the term size will be copied to the pty.
@type copy_term_size: int
@rtype: tuple
@returns: A tuple of (is_pty, master_fd, slave_fd) where
is_pty is True if a pty was successfully allocated, and
False if a normal pipe was allocated.
"""
got_pty = False
global _disable_openpty, _fbsd_test_pty
if _fbsd_test_pty and not _disable_openpty:
# Test for python openpty breakage after freebsd7 to freebsd8
# upgrade, which results in a 'Function not implemented' error
# and the process being killed.
pid = os.fork()
if pid == 0:
pty.openpty()
os._exit(os.EX_OK)
pid, status = os.waitpid(pid, 0)
if (status & 0xff) == 140:
_disable_openpty = True
_fbsd_test_pty = False
if _disable_openpty:
master_fd, slave_fd = os.pipe()
else:
try:
master_fd, slave_fd = pty.openpty()
got_pty = True
except EnvironmentError as e:
_disable_openpty = True
writemsg("openpty failed: '%s'\n" % str(e),
noiselevel=-1)
del e
master_fd, slave_fd = os.pipe()
if got_pty:
# Disable post-processing of output since otherwise weird
# things like \n -> \r\n transformations may occur.
mode = termios.tcgetattr(slave_fd)
mode[1] &= ~termios.OPOST
termios.tcsetattr(slave_fd, termios.TCSANOW, mode)
if got_pty and \
copy_term_size is not None and \
os.isatty(copy_term_size):
rows, columns = get_term_size()
set_term_size(rows, columns, slave_fd)
return (got_pty, master_fd, slave_fd)
示例3: _pipe
def _pipe(self, fd_pipes):
"""When appropriate, use a pty so that fetcher progress bars,
like wget has, will work properly."""
if self.background or not sys.stdout.isatty():
# When the output only goes to a log file,
# there's no point in creating a pty.
return os.pipe()
stdout_pipe = fd_pipes.get(1)
got_pty, master_fd, slave_fd = \
_create_pty_or_pipe(copy_term_size=stdout_pipe)
return (master_fd, slave_fd)
示例4: _start
def _start(self):
in_pr, in_pw = os.pipe()
out_pr, out_pw = os.pipe()
self._files = {}
self._files['pipe_in'] = in_pr
self._files['pipe_out'] = out_pw
fcntl.fcntl(in_pr, fcntl.F_SETFL,
fcntl.fcntl(in_pr, fcntl.F_GETFL) | os.O_NONBLOCK)
self._reg_id = self.scheduler.register(in_pr,
self.scheduler.IO_IN, self._output_handler)
self._registered = True
self._proc = SpawnProcess(
args=[portage._python_interpreter,
os.path.join(portage._bin_path, 'lock-helper.py'), self.path],
env=dict(os.environ, PORTAGE_PYM_PATH=portage._pym_path),
fd_pipes={0:out_pr, 1:in_pw, 2:sys.stderr.fileno()},
scheduler=self.scheduler)
self._proc.addExitListener(self._proc_exit)
self._proc.start()
os.close(out_pr)
os.close(in_pw)
示例5: _start
def _start(self):
pr, pw = os.pipe()
self.fd_pipes = {}
self.fd_pipes[pw] = pw
self._digest_pw = pw
self._digest_pipe_reader = PipeReader(
input_files={"input":pr},
scheduler=self.scheduler)
self._digest_pipe_reader.addExitListener(self._digest_pipe_reader_exit)
self._digest_pipe_reader.start()
ForkProcess._start(self)
os.close(pw)
示例6: testLazyImportPortageBaseline
def testLazyImportPortageBaseline(self):
"""
Check what modules are imported by a baseline module import.
"""
env = os.environ.copy()
pythonpath = env.get('PYTHONPATH')
if pythonpath is not None and not pythonpath.strip():
pythonpath = None
if pythonpath is None:
pythonpath = ''
else:
pythonpath = ':' + pythonpath
pythonpath = PORTAGE_PYM_PATH + pythonpath
env['PYTHONPATH'] = pythonpath
# If python is patched to insert the path of the
# currently installed portage module into sys.path,
# then the above PYTHONPATH override doesn't help.
env['PORTAGE_PYM_PATH'] = PORTAGE_PYM_PATH
scheduler = PollScheduler().sched_iface
master_fd, slave_fd = os.pipe()
master_file = os.fdopen(master_fd, 'rb', 0)
slave_file = os.fdopen(slave_fd, 'wb')
producer = SpawnProcess(
args=self._baseline_import_cmd,
env=env, fd_pipes={1:slave_fd},
scheduler=scheduler)
producer.start()
slave_file.close()
consumer = PipeReader(
input_files={"producer" : master_file},
scheduler=scheduler)
consumer.start()
consumer.wait()
self.assertEqual(producer.wait(), os.EX_OK)
self.assertEqual(consumer.wait(), os.EX_OK)
output = consumer.getvalue().decode('ascii', 'replace').split()
unexpected_modules = " ".join(sorted(x for x in output \
if self._module_re.match(x) is not None and \
x not in self._baseline_imports))
self.assertEqual("", unexpected_modules)
示例7: _start
def _start(self):
pr, pw = os.pipe()
self._files = {}
self._files['pipe_read'] = os.fdopen(pr, 'rb', 0)
self._files['pipe_write'] = os.fdopen(pw, 'wb', 0)
for k, f in self._files.items():
fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
self._reg_id = self.scheduler.register(self._files['pipe_read'].fileno(),
PollConstants.POLLIN, self._output_handler)
self._registered = True
threading_mod = threading
if self._force_dummy:
threading_mod = dummy_threading
self._thread = threading_mod.Thread(target=self._run_lock)
self._thread.start()
示例8: _testPipeReader
def _testPipeReader(self, test_string):
"""
Use a poll loop to read data from a pipe and assert that
the data written to the pipe is identical to the data
read from the pipe.
"""
if self._use_pty:
got_pty, master_fd, slave_fd = _create_pty_or_pipe()
if not got_pty:
os.close(slave_fd)
os.close(master_fd)
skip_reason = "pty not acquired"
self.portage_skip = skip_reason
self.fail(skip_reason)
return
else:
master_fd, slave_fd = os.pipe()
# WARNING: It is very important to use unbuffered mode here,
# in order to avoid issue 5380 with python3.
master_file = os.fdopen(master_fd, 'rb', 0)
slave_file = os.fdopen(slave_fd, 'wb', 0)
task_scheduler = TaskScheduler(max_jobs=2)
producer = SpawnProcess(
args=["bash", "-c", self._echo_cmd % test_string],
env=os.environ, fd_pipes={1:slave_fd},
scheduler=task_scheduler.sched_iface)
task_scheduler.add(producer)
slave_file.close()
consumer = PipeReader(
input_files={"producer" : master_file},
scheduler=task_scheduler.sched_iface, _use_array=self._use_array)
task_scheduler.add(consumer)
# This will ensure that both tasks have exited, which
# is necessary to avoid "ResourceWarning: unclosed file"
# warnings since Python 3.2 (and also ensures that we
# don't leave any zombie child processes).
task_scheduler.run()
self.assertEqual(producer.returncode, os.EX_OK)
self.assertEqual(consumer.returncode, os.EX_OK)
return consumer.getvalue().decode('ascii', 'replace')
示例9: _testPipeReader
def _testPipeReader(self, test_string):
"""
Use a poll loop to read data from a pipe and assert that
the data written to the pipe is identical to the data
read from the pipe.
"""
if self._use_pty:
got_pty, master_fd, slave_fd = _create_pty_or_pipe()
if not got_pty:
os.close(slave_fd)
os.close(master_fd)
skip_reason = "pty not acquired"
self.portage_skip = skip_reason
self.fail(skip_reason)
return
else:
master_fd, slave_fd = os.pipe()
# WARNING: It is very important to use unbuffered mode here,
# in order to avoid issue 5380 with python3.
master_file = os.fdopen(master_fd, 'rb', 0)
scheduler = global_event_loop()
consumer = PipeReader(
input_files={"producer" : master_file},
_use_array=self._use_array,
scheduler=scheduler)
producer = PopenProcess(
pipe_reader=consumer,
proc=subprocess.Popen(["bash", "-c", self._echo_cmd % test_string],
stdout=slave_fd),
scheduler=scheduler)
producer.start()
os.close(slave_fd)
producer.wait()
self.assertEqual(producer.returncode, os.EX_OK)
self.assertEqual(consumer.returncode, os.EX_OK)
return consumer.getvalue().decode('ascii', 'replace')
示例10: testPipeReader
def testPipeReader(self):
"""
Use a poll loop to read data from a pipe and assert that
the data written to the pipe is identical to the data
read from the pipe.
"""
test_string = 2 * "blah blah blah\n"
scheduler = PollScheduler().sched_iface
master_fd, slave_fd = os.pipe()
master_file = os.fdopen(master_fd, 'rb', 0)
slave_file = os.fdopen(slave_fd, 'wb')
producer = SpawnProcess(
args=["bash", "-c", "echo -n '%s'" % test_string],
env=os.environ, fd_pipes={1:slave_fd},
scheduler=scheduler)
producer.start()
slave_file.close()
consumer = PipeReader(
input_files={"producer" : master_file},
scheduler=scheduler)
consumer.start()
# This will ensure that both tasks have exited, which
# is necessary to avoid "ResourceWarning: unclosed file"
# warnings since Python 3.2 (and also ensures that we
# don't leave any zombie child processes).
scheduler.schedule()
self.assertEqual(producer.returncode, os.EX_OK)
self.assertEqual(consumer.returncode, os.EX_OK)
output = consumer.getvalue().decode('ascii', 'replace')
self.assertEqual(test_string, output)
示例11: testDoebuild
def testDoebuild(self):
"""
Invoke portage.doebuild() with the fd_pipes parameter, and
check that the expected output appears in the pipe. This
functionality is not used by portage internally, but it is
supported for API consumers (see bug #475812).
"""
output_fd = 200
ebuild_body = ['S=${WORKDIR}']
for phase_func in ('pkg_info', 'pkg_nofetch', 'pkg_pretend',
'pkg_setup', 'src_unpack', 'src_prepare', 'src_configure',
'src_compile', 'src_test', 'src_install'):
ebuild_body.append(('%s() { echo ${EBUILD_PHASE}'
' 1>&%s; }') % (phase_func, output_fd))
ebuild_body.append('')
ebuild_body = '\n'.join(ebuild_body)
ebuilds = {
'app-misct/foo-1': {
'EAPI' : '5',
"MISC_CONTENT": ebuild_body,
}
}
# Override things that may be unavailable, or may have portability
# issues when running tests in exotic environments.
# prepstrip - bug #447810 (bash read builtin EINTR problem)
true_symlinks = ("find", "prepstrip", "sed", "scanelf")
true_binary = portage.process.find_binary("true")
self.assertEqual(true_binary is None, False,
"true command not found")
dev_null = open(os.devnull, 'wb')
playground = ResolverPlayground(ebuilds=ebuilds)
try:
QueryCommand._db = playground.trees
root_config = playground.trees[playground.eroot]['root_config']
portdb = root_config.trees["porttree"].dbapi
settings = portage.config(clone=playground.settings)
if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ:
settings["__PORTAGE_TEST_HARDLINK_LOCKS"] = \
os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"]
settings.backup_changes("__PORTAGE_TEST_HARDLINK_LOCKS")
settings.features.add("noauto")
settings.features.add("test")
settings['PORTAGE_PYTHON'] = portage._python_interpreter
settings['PORTAGE_QUIET'] = "1"
settings['PYTHONDONTWRITEBYTECODE'] = os.environ.get("PYTHONDONTWRITEBYTECODE", "")
fake_bin = os.path.join(settings["EPREFIX"], "bin")
portage.util.ensure_dirs(fake_bin)
for x in true_symlinks:
os.symlink(true_binary, os.path.join(fake_bin, x))
settings["__PORTAGE_TEST_PATH_OVERRIDE"] = fake_bin
settings.backup_changes("__PORTAGE_TEST_PATH_OVERRIDE")
cpv = 'app-misct/foo-1'
metadata = dict(zip(Package.metadata_keys,
portdb.aux_get(cpv, Package.metadata_keys)))
pkg = Package(built=False, cpv=cpv, installed=False,
metadata=metadata, root_config=root_config,
type_name='ebuild')
settings.setcpv(pkg)
ebuildpath = portdb.findname(cpv)
self.assertNotEqual(ebuildpath, None)
for phase in ('info', 'nofetch',
'pretend', 'setup', 'unpack', 'prepare', 'configure',
'compile', 'test', 'install', 'qmerge', 'clean', 'merge'):
pr, pw = os.pipe()
producer = DoebuildProcess(doebuild_pargs=(ebuildpath, phase),
doebuild_kwargs={"settings" : settings,
"mydbapi": portdb, "tree": "porttree",
"vartree": root_config.trees["vartree"],
"fd_pipes": {
1: dev_null.fileno(),
2: dev_null.fileno(),
output_fd: pw,
},
"prev_mtimes": {}})
consumer = PipeReader(
input_files={"producer" : pr})
task_scheduler = TaskScheduler(iter([producer, consumer]),
max_jobs=2)
try:
task_scheduler.start()
finally:
# PipeReader closes pr
os.close(pw)
#.........这里部分代码省略.........
示例12: spawn
def spawn(mycommand, env={}, opt_name=None, fd_pipes=None, returnpid=False,
uid=None, gid=None, groups=None, umask=None, logfile=None,
path_lookup=True, pre_exec=None):
"""
Spawns a given command.
@param mycommand: the command to execute
@type mycommand: String or List (Popen style list)
@param env: A dict of Key=Value pairs for env variables
@type env: Dictionary
@param opt_name: an optional name for the spawn'd process (defaults to the binary name)
@type opt_name: String
@param fd_pipes: A dict of mapping for pipes, { '0': stdin, '1': stdout } for example
@type fd_pipes: Dictionary
@param returnpid: Return the Process IDs for a successful spawn.
NOTE: This requires the caller clean up all the PIDs, otherwise spawn will clean them.
@type returnpid: Boolean
@param uid: User ID to spawn as; useful for dropping privilages
@type uid: Integer
@param gid: Group ID to spawn as; useful for dropping privilages
@type gid: Integer
@param groups: Group ID's to spawn in: useful for having the process run in multiple group contexts.
@type groups: List
@param umask: An integer representing the umask for the process (see man chmod for umask details)
@type umask: Integer
@param logfile: name of a file to use for logging purposes
@type logfile: String
@param path_lookup: If the binary is not fully specified then look for it in PATH
@type path_lookup: Boolean
@param pre_exec: A function to be called with no arguments just prior to the exec call.
@type pre_exec: callable
logfile requires stdout and stderr to be assigned to this process (ie not pointed
somewhere else.)
"""
# mycommand is either a str or a list
if isinstance(mycommand, basestring):
mycommand = mycommand.split()
if sys.hexversion < 0x3000000:
# Avoid a potential UnicodeEncodeError from os.execve().
env_bytes = {}
for k, v in env.items():
env_bytes[_unicode_encode(k, encoding=_encodings['content'])] = \
_unicode_encode(v, encoding=_encodings['content'])
env = env_bytes
del env_bytes
# If an absolute path to an executable file isn't given
# search for it unless we've been told not to.
binary = mycommand[0]
if binary not in (BASH_BINARY, SANDBOX_BINARY, FAKEROOT_BINARY) and \
(not os.path.isabs(binary) or not os.path.isfile(binary)
or not os.access(binary, os.X_OK)):
binary = path_lookup and find_binary(binary) or None
if not binary:
raise CommandNotFound(mycommand[0])
# If we haven't been told what file descriptors to use
# default to propagating our stdin, stdout and stderr.
if fd_pipes is None:
fd_pipes = {
0:sys.__stdin__.fileno(),
1:sys.__stdout__.fileno(),
2:sys.__stderr__.fileno(),
}
# mypids will hold the pids of all processes created.
mypids = []
if logfile:
# Using a log file requires that stdout and stderr
# are assigned to the process we're running.
if 1 not in fd_pipes or 2 not in fd_pipes:
raise ValueError(fd_pipes)
# Create a pipe
(pr, pw) = os.pipe()
# Create a tee process, giving it our stdout and stderr
# as well as the read end of the pipe.
mypids.extend(spawn(('tee', '-i', '-a', logfile),
returnpid=True, fd_pipes={0:pr,
1:fd_pipes[1], 2:fd_pipes[2]}))
# We don't need the read end of the pipe, so close it.
os.close(pr)
# Assign the write end of the pipe to our stdout and stderr.
fd_pipes[1] = pw
fd_pipes[2] = pw
pid = os.fork()
if pid == 0:
try:
_exec(binary, mycommand, opt_name, fd_pipes,
env, gid, groups, uid, umask, pre_exec)
#.........这里部分代码省略.........
示例13: _start
def _start(self):
settings = self.settings
settings.setcpv(self.cpv)
ebuild_path = self.ebuild_path
eapi = None
if 'parse-eapi-glep-55' in settings.features:
pf, eapi = portage._split_ebuild_name_glep55(
os.path.basename(ebuild_path))
if eapi is None and \
'parse-eapi-ebuild-head' in settings.features:
eapi = portage._parse_eapi_ebuild_head(
codecs.open(_unicode_encode(ebuild_path,
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['repo.content'],
errors='replace'))
if eapi is not None:
if not portage.eapi_is_supported(eapi):
self.metadata_callback(self.cpv, self.ebuild_path,
self.repo_path, {'EAPI' : eapi}, self.ebuild_mtime)
self.returncode = os.EX_OK
self.wait()
return
settings.configdict['pkg']['EAPI'] = eapi
debug = settings.get("PORTAGE_DEBUG") == "1"
master_fd = None
slave_fd = None
fd_pipes = None
if self.fd_pipes is not None:
fd_pipes = self.fd_pipes.copy()
else:
fd_pipes = {}
fd_pipes.setdefault(0, sys.stdin.fileno())
fd_pipes.setdefault(1, sys.stdout.fileno())
fd_pipes.setdefault(2, sys.stderr.fileno())
# flush any pending output
for fd in fd_pipes.values():
if fd == sys.stdout.fileno():
sys.stdout.flush()
if fd == sys.stderr.fileno():
sys.stderr.flush()
fd_pipes_orig = fd_pipes.copy()
self._files = self._files_dict()
files = self._files
master_fd, slave_fd = os.pipe()
fcntl.fcntl(master_fd, fcntl.F_SETFL,
fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
fd_pipes[self._metadata_fd] = slave_fd
self._raw_metadata = []
files.ebuild = os.fdopen(master_fd, 'rb')
self._reg_id = self.scheduler.register(files.ebuild.fileno(),
self._registered_events, self._output_handler)
self._registered = True
retval = portage.doebuild(ebuild_path, "depend",
settings["ROOT"], settings, debug,
mydbapi=self.portdb, tree="porttree",
fd_pipes=fd_pipes, returnpid=True)
os.close(slave_fd)
if isinstance(retval, int):
# doebuild failed before spawning
self._unregister()
self.returncode = retval
self.wait()
return
self.pid = retval[0]
portage.process.spawned_pids.remove(self.pid)
示例14: _create_pipe
def _create_pipe(self):
return os.pipe()
示例15: make_pipes
def make_pipes():
return os.pipe(), None