本文整理汇总了Python中tempfile.TemporaryFile.fileno方法的典型用法代码示例。如果您正苦于以下问题:Python TemporaryFile.fileno方法的具体用法?Python TemporaryFile.fileno怎么用?Python TemporaryFile.fileno使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tempfile.TemporaryFile
的用法示例。
在下文中一共展示了TemporaryFile.fileno方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from tempfile import TemporaryFile [as 别名]
# 或者: from tempfile.TemporaryFile import fileno [as 别名]
class MockSys:
def __init__(self):
self.stdin = TemporaryFile("w")
self.stdout = TemporaryFile("r")
self.stderr = TemporaryFile("r")
self.__stderr__ = self.stderr
self.stdio_fds = [self.stdin.fileno(), self.stdout.fileno(), self.stderr.fileno()]
示例2: apply
# 需要导入模块: from tempfile import TemporaryFile [as 别名]
# 或者: from tempfile.TemporaryFile import fileno [as 别名]
def apply(self, req, proj):
"""Run this prototype on a new project.
NOTE: If you pass in a project that isn't new, this could explode. Don't do that.
"""
from api import TracForgeAdminSystem
steps = TracForgeAdminSystem(self.env).get_project_setup_participants()
db = self.env.get_db_cnx()
cursor = db.cursor()
cursor.execute("DELETE FROM tracforge_project_log WHERE project=%s", (proj.name,))
db.commit()
for step in self:
action = args = None
if isinstance(step, dict):
action = step["action"]
args = step["args"]
else:
action, args = step
pid = os.fork()
if not pid:
# o_fd, o_file = mkstemp('tracforge-step', text=True)
# e_fd, e_file = mkstemp('tracforge-step', text=True)
o_file = TemporaryFile(prefix="tracforge-step", bufsize=0)
e_file = TemporaryFile(prefix="tracforge-step", bufsize=0)
sys.stdout = o_file
sys.stderr = e_file
os.dup2(o_file.fileno(), 1)
os.dup2(e_file.fileno(), 2)
rv = steps[action]["provider"].execute_setup_action(req, proj, action, args)
self.env.log.debug("TracForge: %s() => %r", action, rv)
o_file.seek(0, 0)
o_data = o_file.read()
o_file.close()
e_file.seek(0, 0)
e_data = e_file.read()
e_file.close()
db = self.env.get_db_cnx()
cursor = db.cursor()
cursor.execute(
"INSERT INTO tracforge_project_log (project, action, args, return, stdout, stderr) VALUES (%s, %s, %s, %s, %s, %s)",
(proj.name, action, args, int(rv), o_data, e_data),
)
db.commit()
db.close()
os._exit(0)
os.waitpid(pid, 0)
示例3: T
# 需要导入模块: from tempfile import TemporaryFile [as 别名]
# 或者: from tempfile.TemporaryFile import fileno [as 别名]
class T(threading.Thread):
_shutdown_msg = "shutdown"
def __init__(self):
threading.Thread.__init__(self)
self._fd = TemporaryFile()
self._comm_fd = TemporaryFile()
self._run = False
def get_file_handle(self):
return self._fd
def run(self):
self._run = True
while self._run:
t1 = time.time()
r, _, _ = select.select([self._fd.fileno(), self._comm_fd.fileno()], [], [])
print "select time:", time.time()-t1
for elem in r:
if elem == self._fd.fileno():
s = self._fd.tell()
self._fd.seek(0, os.SEEK_END) # to the end
e = self._fd.tell()
if s == e: # nothing new
continue
self._fd.seek(-(e-s), os.SEEK_END)
diff = self._fd.read(e-s)
if True:
sys.stdout.write(diff)
sys.stdout.flush()
# exit
elif elem == self._comm_fd.fileno():
self._comm_fd.seek(0, os.SEEK_END)
if self._comm_fd.tell() == len(T._shutdown_msg):
self._run = False
self._comm_fd.write(T._shutdown_msg)
self._comm_fd.flush()
def stop(self):
self._comm_fd.seek(0, os.SEEK_END)
if self._comm_fd.tell() != 0:
return
self._comm_fd.write(T._shutdown_msg)
self._comm_fd.flush()
while self._comm_fd.tell() != 2*len(T._shutdown_msg):
self._comm_fd.seek(0, os.SEEK_END)
def __del__(self, ):
self._fd.close()
示例4: backup
# 需要导入模块: from tempfile import TemporaryFile [as 别名]
# 或者: from tempfile.TemporaryFile import fileno [as 别名]
def backup(self):
"""
Create backup
"""
if self.dry_run:
return
if not os.path.exists(self.config["tar"]["directory"]) or not os.path.isdir(
self.config["tar"]["directory"]
):
raise BackupError("{0} is not a directory!".format(self.config["tar"]["directory"]))
out_name = "{0}.tar".format(self.config["tar"]["directory"].lstrip("/").replace("/", "_"))
outfile = os.path.join(self.target_directory, out_name)
args = ["tar", "c", self.config["tar"]["directory"]]
errlog = TemporaryFile()
stream = open_stream(outfile, "w", **self.config["compression"])
LOG.info("Executing: %s", list2cmdline(args))
pid = Popen(args, stdout=stream.fileno(), stderr=errlog.fileno(), close_fds=True)
status = pid.wait()
try:
errlog.flush()
errlog.seek(0)
for line in errlog:
LOG.error("%s[%d]: %s", list2cmdline(args), pid.pid, line.rstrip())
finally:
errlog.close()
if status != 0:
raise BackupError("tar failed (status={0})".format(status))
示例5: convert_hwp5file_into_odtpkg
# 需要导入模块: from tempfile import TemporaryFile [as 别名]
# 或者: from tempfile.TemporaryFile import fileno [as 别名]
def convert_hwp5file_into_odtpkg(hwp5file):
from tempfile import TemporaryFile
tmpfile = TemporaryFile()
import os
tmpfile2 = os.fdopen( os.dup(tmpfile.fileno()), 'r')
from zipfile import ZipFile
zf = ZipFile(tmpfile, 'w')
from hwp5.hwp5odt import ODTPackage
odtpkg = ODTPackage(zf)
try:
from hwp5.hwp5odt import Converter
import hwp5.plat
if haveXSLTTransformer():
xslt = xslt_with_libreoffice
else:
# we use default xslt
xslt = hwp5.plat.get_xslt()
# convert without RelaxNG validation
convert = Converter(xslt)
# Embed images: see #32 - https://github.com/mete0r/pyhwp/issues/32
convert(hwp5file, odtpkg, embedimage=True)
finally:
odtpkg.close()
tmpfile2.seek(0)
odtpkg_stream = InputStreamFromFileLike(tmpfile2)
odtpkg_storage = StorageFromInputStream(odtpkg_stream)
return odtpkg_storage
示例6: backup
# 需要导入模块: from tempfile import TemporaryFile [as 别名]
# 或者: from tempfile.TemporaryFile import fileno [as 别名]
def backup(self):
if self.dry_run:
return
if not os.path.exists(self.config['tar']['directory']) \
or not os.path.isdir(self.config['tar']['directory']):
raise BackupError('{0} is not a directory!'.format(self.config['tar']['directory']))
out_name = "{0}.tar".format(
self.config['tar']['directory'].lstrip('/').replace('/', '_'))
outfile = os.path.join(self.target_directory, out_name)
args = ['tar', 'c', self.config['tar']['directory']]
errlog = TemporaryFile()
stream = self._open_stream(outfile, 'w')
LOG.info("Executing: %s", list2cmdline(args))
pid = Popen(
args,
stdout=stream.fileno(),
stderr=errlog.fileno(),
close_fds=True)
status = pid.wait()
try:
errlog.flush()
errlog.seek(0)
for line in errlog:
LOG.error("%s[%d]: %s", list2cmdline(args), pid.pid, line.rstrip())
finally:
errlog.close()
if status != 0:
raise BackupError('tar failed (status={0})'.format(status))
示例7: LeptonicaErrorTrap
# 需要导入模块: from tempfile import TemporaryFile [as 别名]
# 或者: from tempfile.TemporaryFile import fileno [as 别名]
class LeptonicaErrorTrap(object):
"""Context manager to trap errors reported by Leptonica.
Leptonica's error return codes are unreliable to the point of being
almost useless. It does, however, write errors to stderr provided that is
not disabled at its compile time. Fortunately this is done using error
macros so it is very self-consistent.
This context manager redirects stderr to a temporary file which is then
read and parsed for error messages. As a side benefit, debug messages
from Leptonica are also suppressed.
"""
def __enter__(self):
from io import UnsupportedOperation
self.tmpfile = TemporaryFile()
# Save the old stderr, and redirect stderr to temporary file
sys.stderr.flush()
try:
self.copy_of_stderr = os.dup(sys.stderr.fileno())
os.dup2(self.tmpfile.fileno(), sys.stderr.fileno(),
inheritable=False)
except UnsupportedOperation:
self.copy_of_stderr = None
return
def __exit__(self, exc_type, exc_value, traceback):
# Restore old stderr
sys.stderr.flush()
if self.copy_of_stderr is not None:
os.dup2(self.copy_of_stderr, sys.stderr.fileno())
os.close(self.copy_of_stderr)
# Get data from tmpfile (in with block to ensure it is closed)
with self.tmpfile as tmpfile:
tmpfile.seek(0) # Cursor will be at end, so move back to beginning
leptonica_output = tmpfile.read().decode(errors='replace')
assert self.tmpfile.closed
assert not sys.stderr.closed
# If there are Python errors, let them bubble up
if exc_type:
logger.warning(leptonica_output)
return False
# If there are Leptonica errors, wrap them in Python excpetions
if 'Error' in leptonica_output:
if 'image file not found' in leptonica_output:
raise FileNotFoundError()
if 'pixWrite: stream not opened' in leptonica_output:
raise LeptonicaIOError()
raise LeptonicaError(leptonica_output)
return False
示例8: LeptonicaErrorTrap
# 需要导入模块: from tempfile import TemporaryFile [as 别名]
# 或者: from tempfile.TemporaryFile import fileno [as 别名]
class LeptonicaErrorTrap(object):
"""Context manager to trap errors reported by Leptonica.
Leptonica's error return codes are unreliable to the point of being
almost useless. It does, however, write errors to stderr provided that is
not disabled at its compile time. Fortunately this is done using error
macros so it is very self-consistent.
This context manager redirects stderr to a temporary file which is then
read and parsed for error messages. As a side benefit, debug messages
from Leptonica are also suppressed.
"""
def __init__(self, verbose=False):
self.verbose = verbose
def __enter__(self):
self.tmpfile = TemporaryFile()
# Save the old stderr, and redirect stderr to temporary file
self.old_stderr_fileno = os.dup(sys.stderr.fileno())
os.dup2(self.tmpfile.fileno(), sys.stderr.fileno())
return
def __exit__(self, exc_type, exc_value, traceback):
# Restore old stderr
os.dup2(self.old_stderr_fileno, sys.stderr.fileno())
os.close(self.old_stderr_fileno)
# Get data from tmpfile (in with block to ensure it is closed)
with self.tmpfile as tmpfile:
tmpfile.seek(0) # Cursor will be at end, so move back to beginning
leptonica_output = tmpfile.read().decode(errors='replace')
# If there are Python errors, let them bubble up
if exc_type:
stderr(leptonica_output)
return False
if self.verbose and leptonica_output.strip() != '':
stderr(leptonica_output)
# If there are Leptonica errors, wrap them in Python excpetions
if 'Error' in leptonica_output:
if 'image file not found' in leptonica_output:
raise LeptonicaIOError()
if 'pixWrite: stream not opened' in leptonica_output:
raise LeptonicaIOError()
if 'not enough conf to get orientation' in leptonica_output:
pass
else:
raise LeptonicaError(leptonica_output)
return False
示例9: StackTraceBuffer
# 需要导入模块: from tempfile import TemporaryFile [as 别名]
# 或者: from tempfile.TemporaryFile import fileno [as 别名]
class StackTraceBuffer(object):
def __init__(self):
self.f = None
def fileno(self):
if not self.f:
self.f = TemporaryFile()
return self.f.fileno()
def getvalue(self):
if self.f:
self.f.seek(0)
return self.f.read()
return ''
示例10: run
# 需要导入模块: from tempfile import TemporaryFile [as 别名]
# 或者: from tempfile.TemporaryFile import fileno [as 别名]
def run(self, databases, stream, additional_options=None):
"""Run mysqldump with the options configured on this instance"""
if not hasattr(stream, "fileno"):
raise MySQLDumpError("Invalid output stream")
if not databases:
raise MySQLDumpError("No databases specified to backup")
args = [self.cmd_path]
if self.defaults_file:
if self.extra_defaults:
args.append("--defaults-extra-file=%s" % self.defaults_file)
else:
args.append("--defaults-file=%s" % self.defaults_file)
args.extend([str(opt) for opt in self.options])
if additional_options:
args.extend(additional_options)
if databases is ALL_DATABASES:
args.append("--all-databases")
else:
if len(databases) > 1:
args.append("--databases")
args.extend(databases)
if self.mock_env:
LOG.info("Dry Run: %s", subprocess.list2cmdline(args))
popen = self.mock_env.mocked_popen
else:
LOG.info("Executing: %s", subprocess.list2cmdline(args))
popen = subprocess.Popen
errlog = TemporaryFile()
pid = popen(args, stdout=stream.fileno(), stderr=errlog.fileno(), close_fds=True)
status = pid.wait()
try:
errlog.flush()
errlog.seek(0)
for line in errlog:
LOG.error("%s[%d]: %s", self.cmd_path, pid.pid, line.rstrip())
finally:
errlog.close()
if status != 0:
raise MySQLDumpError("mysqldump exited with non-zero status %d" % pid.returncode)
示例11: APRFile
# 需要导入模块: from tempfile import TemporaryFile [as 别名]
# 或者: from tempfile.TemporaryFile import fileno [as 别名]
class APRFile(object):
"""Wrap a Python file-like object as an APR File"""
def __init__(self, pyfile):
self.pyfile = pyfile
self.pool = Pool()
self._as_parameter_ = POINTER(apr_file_t)()
self.tempfile = None
if hasattr(pyfile, "fileno"):
# Looks like this is a real file. We can just write
# directly to said file
osfile = apr_os_file_t(get_osfhandle(pyfile.fileno()))
else:
# Looks like this is a StringIO buffer or a fake file.
# Write to a temporary file and copy the output to the
# buffer when we are closed or flushed
self.tempfile = TemporaryFile()
osfile = apr_os_file_t(get_osfhandle(self.tempfile.fileno()))
apr_os_file_put(byref(self._as_parameter_), byref(osfile),
APR_CREATE | APR_WRITE | APR_BINARY, self.pool)
def flush(self):
"""Flush output to the underlying Python object"""
if self.tempfile:
self.tempfile.seek(0)
copyfileobj(self.tempfile, self.pyfile)
self.tempfile.truncate(0)
def close(self):
"""Close the APR file wrapper, leaving the underlying Python object
untouched"""
self.flush()
if self.tempfile:
self.tempfile.close()
self.tempfile = None
self.pool.destroy()
self.pool = None
def __del__(self):
if self.pool:
self.close()
示例12: _open_stream
# 需要导入模块: from tempfile import TemporaryFile [as 别名]
# 或者: from tempfile.TemporaryFile import fileno [as 别名]
def _open_stream(self, path, mode, method=None):
"""Open a stream through the holland compression api, relative to
this instance's target directory
"""
compression_method = method or self.config['compression']['method']
compression_level = self.config['compression']['level']
compression_options = self.config['compression']['options']
stream = open_stream(path,
mode,
compression_method,
compression_level,
extra_args=compression_options)
return stream
def backup(self):
if self.dry_run:
return
if not os.path.exists(self.config['tar']['directory'])
or not os.path.isdir(self.config['tar']['directory']):
raise BackupError('{0} is not a directory!'.format(self.config['tar']['directory']))
out_name = "{0}.tar".format(
self.config['tar']['directory'].lstrip('/').replace('/', '_'))
outfile = os.path.join(self.target_directory, out_name)
args = ['tar', 'c', self.config['tar']['directory']]
errlog = TemporaryFile()
stream = self._open_stream(outfile, 'w')
LOG.info("Executing: %s", list2cmdline(args))
pid = Popen(
args,
stdout=stream.fileno(),
stderr=errlog.fileno(),
close_fds=True)
status = pid.wait()
try:
errlog.flush()
errlog.seek(0)
for line in errlog:
LOG.error("%s[%d]: %s", list2cmdline(args), pid.pid, line.rstrip())
finally:
errlog.close()
示例13: __init__
# 需要导入模块: from tempfile import TemporaryFile [as 别名]
# 或者: from tempfile.TemporaryFile import fileno [as 别名]
def __init__(self, stream, length, _shared=None):
"""
:param stream: THE STREAM WE WILL GET THE BYTES FROM
:param length: THE MAX NUMBER OF BYTES WE ARE EXPECTING
:param _shared: FOR INTERNAL USE TO SHARE THE BUFFER
:return:
"""
self.position = 0
file_ = TemporaryFile()
if not _shared:
self.shared = Dict(
length=length,
locker=Lock(),
stream=stream,
done_read=0,
file=file_,
buffer=mmap(file_.fileno(), length)
)
else:
self.shared = _shared
self.shared.ref_count += 1
示例14: run
# 需要导入模块: from tempfile import TemporaryFile [as 别名]
# 或者: from tempfile.TemporaryFile import fileno [as 别名]
def run(self, params):
if self.datastore.get_one('users', ('username', '=', params.get('user'))) is None:
raise TaskException(
errno.ENOENT, 'User {0} does not exist'.format(params.get('user'))
)
self.message = 'Starting Rsync Task'
self.set_progress(0)
with open(os.path.join(params['path'], '.lock'), 'wb+') as lockfile:
# Lets try and get a lock on this path for the rsync task
# but do not freak out if you do not get it
try:
flock(lockfile, LOCK_EX | LOCK_NB)
except IOError:
logger.warning('Rsync Task could not get a lock on {0}'.format(params['path']))
# Execute Rsync Task here
line = '/usr/local/bin/rsync --info=progress2 -h'
rsync_properties = params.get('rsync_properties')
if rsync_properties:
if rsync_properties.get('recursive'):
line += ' -r'
if rsync_properties.get('times'):
line += ' -t'
if rsync_properties.get('compress'):
line += ' -z'
if rsync_properties.get('archive'):
line += ' -a'
if rsync_properties.get('preserve_permissions'):
line += ' -p'
if rsync_properties.get('preserve_attributes'):
line += ' -X'
if rsync_properties.get('delete'):
line += ' --delete-delay'
if rsync_properties.get('delay_updates'):
line += ' --delay-updates'
if rsync_properties.get('extra'):
line += ' {0}'.format(rsync_properties.get('extra'))
remote_host = params.get('remote_host')
remote_address = ''
if '@' in remote_host:
remote_address = remote_host
else:
remote_user = params.get('remote_user', params.get('user'))
remote_address = '"{0}"@{1}'.format(remote_user, remote_host)
if params.get('rsync_mode') == 'MODULE':
if params.get('rsync_direction') == 'PUSH':
line += ' "{0}" {1}::"{2}"'.format(
params.get('path'),
remote_address,
params.get('remote_module'),
)
else:
line += ' {0}::"{1}" "{2}"'.format(
remote_address,
params.get('remote_module'),
params.get('rsync_path'),
)
else:
line += ' -e "ssh -p {0} -o BatchMode=yes -o StrictHostKeyChecking=yes"'.format(
params.get('remote_ssh_port', 22)
)
if params.get('rsync_direction') == 'PUSH':
line += ' "{0}" {1}:\\""{2}"\\"'.format(
params.get('path'),
remote_address,
params.get('remote_path'),
)
else:
line += ' {0}:\\""{1}"\\" "{2}"'.format(
remote_address,
params.get('remote_path'),
params.get('path'),
)
if params.get('quiet'):
line += ' > /dev/null 2>&1'
# Starting rsync subprocess
logger.debug('Rsync Copy Task Command: {0}'.format(line))
# It would be nice to get the progess but not at the cost of
# killing this task!
# Note this TemporaryFile hack for the subprocess stdout is needed
# because setting Popen's `stdout=subprocess.PIPE` does not allow
# that sstdout to be seeked on. subprocess.PIPE only allows for
# readline() and such read methods. stdout.readline() does not
# allow for us to catch rsync's in-place progress updates which
# are done with the '\r' character. It is also auto garbage collected.
proc_stdout = TemporaryFile(mode='w+b', buffering=0)
try:
rsync_proc = subprocess.Popen(
line,
stdout=proc_stdout.fileno(),
stderr=subprocess.PIPE,
shell=True,
bufsize=0,
preexec_fn=demote(params.get('user'))
#.........这里部分代码省略.........