本文整理汇总了Python中tempfile._get_candidate_names函数的典型用法代码示例。如果您正苦于以下问题:Python _get_candidate_names函数的具体用法?Python _get_candidate_names怎么用?Python _get_candidate_names使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了_get_candidate_names函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: deep_matches_vids
def deep_matches_vids(exec_name, vid_frames_path, extra_params=""):
all_matches = []
# get a temporary filename
tmp_vid_fname = next(tempfile._get_candidate_names()) + ".avi"
tmp_fname = next(tempfile._get_candidate_names())
ffmpeg_cmd = "ffmpeg -i %s -c:v huffyuv -pix_fmt rgb24 %s" % (vid_frames_path, tmp_vid_fname)
cmd = '%s -i "%s" %s -b -out %s' % (exec_name, tmp_vid_fname, extra_params, tmp_fname)
try:
proc = subprocess.Popen(ffmpeg_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
ret = proc.wait()
t1 = datetime.now()
ret = os.system(cmd)
t2 = datetime.now()
delta = t2 - t1
#sys.stdout.write("%s - %.2fs | " % (ret, delta.seconds + delta.microseconds/1E6))
all_matches = read_output_vid_matches_binary(tmp_fname)
finally:
# delete the output file if it exists
try:
os.remove(tmp_vid_fname)
os.remove(tmp_fname)
except OSError:
pass
return all_matches
示例2: setUp
def setUp(self):
if not os.path.exists(self.fixture_dir):
os.mkdir(self.fixture_dir)
self.tmp_file_path = os.path.join(
self.fixture_dir, next(tempfile._get_candidate_names()))
while os.path.exists(self.tmp_file_path):
self.tmp_file_path = os.path.join(
self.fixture_dir, next(tempfile._get_candidate_names()))
示例3: processtable
def processtable(raster_obj, zone_file, w, out_folder, out_file):
temp_name = "/" + next(tempfile._get_candidate_names()) + ".dbf"
arcpy.sa.ZonalStatisticsAsTable(zone_file, 'VALUE', raster_obj, out_folder + temp_name, "DATA", "SUM")
arcpy.AddField_management(out_folder + temp_name, "elev", 'TEXT')
arcpy.CalculateField_management(out_folder + temp_name, "elev", "'" + str(w*0.1) + "'", "PYTHON")
arcpy.TableToTable_conversion(out_folder + temp_name, out_folder, out_file)
arcpy.Delete_management(out_folder + temp_name)
示例4: deep_matches_ims
def deep_matches_ims(exec_name, im1_fp, im2_fp, binary_out=False, has_num_matches=True, extra_params=""):
matches = []
# get a temporary filename
tmp_fname = next(tempfile._get_candidate_names())
if binary_out:
extra_params += " -b"
cmd = '%s "%s" "%s" %s -out %s' % (exec_name, im1_fp, im2_fp, extra_params, tmp_fname)
try:
t1 = datetime.now()
ret = os.system(cmd)
t2 = datetime.now()
delta = t2 - t1
#sys.stdout.write("%s - %.2fs | " % (ret, delta.seconds + delta.microseconds/1E6))
if binary_out:
matches = read_output_matches_binary(tmp_fname)
else:
matches = read_output_matches(tmp_fname, has_num_matches)
finally:
# delete the output file if it exists
try:
os.remove(tmp_fname)
except OSError:
pass
return matches
示例5: auto_resume
def auto_resume(cmd, logdir=os.curdir, name=None, logger=None, debug=True):
if name is None:
name = next(tempfile._get_candidate_names()) + '.log'
logpath = os.path.join(logdir, name)
if logger is None:
logger = _init_default_logger(logpath, debug)
if isinstance(cmd, list):
cmd = ' '.join(cmd)
while True:
is_first_line = True
for line in CommandRunner.run(cmd):
if is_first_line:
is_first_line = False
logger.info('start `%s`'%cmd)
if line.extra_attrs['tag'] is CommandRunner.Status.STDOUT:
logger.debug(line)
else:
logger.warning(line)
logger.error('found %s exit...'%cmd)
示例6: abstracts
def abstracts(queryString):
# First call pubmed to pull out abstracts - one line per abstract
temp_name = "./"+next(tempfile._get_candidate_names())
efetchCommand = "esearch -db pubmed -query \""+queryString+"\" | efetch -mode xml -format abstract | xtract -pattern PubmedArticle -block Abstract -element AbstractText>"+temp_name
os.system(efetchCommand)
# Now call Normalizr (Python 3 - oi vey) to normalise text.
# Normalised text will overwrite original text.
normalizeCommand = "/Users/upac004/Python/GOFind-master/normText.py "+ temp_name
os.system(normalizeCommand)
# Now read in file to get each abstract and return a list
theAbstracts = []
fd = open(temp_name)
for line in fd:
# Remove any special non-ASCII characters
line = ''.join([i if ord(i) < 128 else '' for i in line])
theAbstracts.append(line)
fd.close()
os.remove(temp_name)
return theAbstracts
示例7: report_worker
def report_worker(sid):
try:
job = get_job(sid)
log.info("============= STARTING WORKER ==============")
log.debug(job)
from ast import literal_eval
job['series'] = literal_eval(job['series']) # From string
# Expand paths to full location on filesystem
output_filename = os.path.join(
app.config['UPLOAD_FOLDER'],
next(tempfile._get_candidate_names()) + '.pdf')
# Make list of input datafiles
input_datafiles = [
os.path.join(app.config['UPLOAD_FOLDER'], f['temporary_name'])
for f in get_files(sid)
]
report.report(input_datafiles, output_filename,
**{**job, 'pdf': True, 'htm': False})
log.info("============= WORKER FINISHED ==============")
# Update finished job
upd_job(sid, 'generated_pdf', output_filename)
upd_job(sid, 'status', 'done')
except Exception as e:
log.error("Exception occurred in worker thread")
log.error(sys.exc_info()[0])
upd_job(sid, 'status', 'error')
upd_job(sid, 'generated_pdf', None)
raise e
示例8: get_temp_aln
def get_temp_aln(aln):
tfname = os.path.join(tempfile._get_default_tempdir(),
next(tempfile._get_candidate_names()))
aln.write(tfname,alignment_format='PIR')
seqs = get_seqs_from_pir(tfname)
os.unlink(tfname)
return seqs
示例9: random_names
def random_names(prefix, suffix=""):
"""Use the same technique that tempfile uses."""
names = tempfile._get_candidate_names()
for i in range(TMP_MAX):
yield prefix + names.next() + suffix
示例10: insert_qr
def insert_qr(pdf, x, y, code=1234567):
if len(code) > 8: return;
qr = qrcode.QRCode(
version=2,
error_correction=qrcode.constants.ERROR_CORRECT_M,
box_size=10,
border=2,
)
qr.add_data(HERBURL % code)
qr.make(fit=True)
img = qr.make_image()
temp_name = os.path.join(BASE_URL, './tmp',
next(tempfile._get_candidate_names()))
temp_name += '.png'
try:
with open(temp_name, 'w') as tmpfile:
img.save(tmpfile)
tmpfile.flush()
pdf.set_xy(x + LABEL_WIDTH - QR_SIZE - 2, y + LABEL_HEIGHT - QR_SIZE - 4)
pdf.image(temp_name, w=QR_SIZE, h=QR_SIZE)
finally:
try:
os.remove(temp_name)
except IOError:
pass
示例11: run
def run(self, musicbrainzid, fpath):
temp_name = next(tempfile._get_candidate_names())
tmpfile = "/tmp/%s.ly" % temp_name
server_name = socket.gethostname()
call(["/mnt/compmusic/%s/lilypond/usr/bin/musicxml2ly" % server_name, "--no-page-layout", fpath, "-o", tmpfile])
tmp_dir = tempfile.mkdtemp()
call(["lilypond", '-dpaper-size=\"junior-legal\"', "-dbackend=svg", "-o" "%s" % (tmp_dir), tmpfile])
ret = {'score': []}
os.unlink(tmpfile)
regex = re.compile(r'.*<a style="(.*)" xlink:href="textedit:\/\/\/.*:([0-9]+):([0-9]+):([0-9]+)">.*')
files = [os.path.join(tmp_dir, f) for f in os.listdir(tmp_dir)]
files = filter(os.path.isfile, files)
files.sort(key=lambda x: os.path.getmtime(x))
for f in files:
if f.endswith('.svg'):
svg_file = open(f)
score = svg_file.read()
ret['score'].append(regex.sub(r'<a style="\1" id="l\2-f\3-t\4" from="\3" to="\4">', score))
svg_file.close()
os.remove(f)
os.rmdir(tmp_dir)
return ret
示例12: generateTemporaryName
def generateTemporaryName(self):
foundFileName = False
while not (foundFileName):
temporaryFileName = next(tempfile._get_candidate_names()) + ".txt"
if not (os.path.isfile(temporaryFileName)):
foundFileName = True
return temporaryFileName
示例13: gen_args
def gen_args ( args, infile_path, outfile ) :
"""
Return the argument list generated from 'args' and the infile path
requested.
Arguments :
args ( string )
Keyword or arguments to use in the call of Consense, excluding
infile and outfile arguments.
infile_path ( string )
Input alignment file path.
outfile ( string )
Consensus tree output file.
Returns :
list
List of arguments (excluding binary file) to call Consense.
"""
if ( outfile ) :
outfile_path = get_abspath(outfile)
else :
# Output files will be saved in temporary files to retrieve the
# consensus tree
outfile_path = os.path.join(tempfile.gettempdir(),
tempfile.gettempprefix() + \
next(tempfile._get_candidate_names()))
# Create full command line list
argument_list = [infile_path, outfile_path]
return ( argument_list )
示例14: mode_pre
def mode_pre(session_dir, args):
global gtmpfilename
"""
Read from Session file and write to session.pre file
"""
endtime_to_update = int(time.time()) - get_changelog_rollover_time(
args.volume)
status_file = os.path.join(session_dir, args.volume, "status")
status_file_pre = status_file + ".pre"
mkdirp(os.path.dirname(args.outfile), exit_on_err=True, logger=logger)
# If Pre status file exists and running pre command again
if os.path.exists(status_file_pre) and not args.regenerate_outfile:
fail("Post command is not run after last pre, "
"use --regenerate-outfile")
start = 0
try:
with open(status_file) as f:
start = int(f.read().strip())
except ValueError:
pass
except (OSError, IOError) as e:
fail("Error Opening Session file %s: %s"
% (status_file, e), logger=logger)
logger.debug("Pre is called - Session: %s, Volume: %s, "
"Start time: %s, End time: %s"
% (args.session, args.volume, start, endtime_to_update))
prefix = datetime.now().strftime("%Y%m%d-%H%M%S-%f-")
gtmpfilename = prefix + next(tempfile._get_candidate_names())
run_cmd_nodes("pre", args, start=start, end=-1, tmpfilename=gtmpfilename)
# Merger
if args.full:
cmd = ["sort", "-u"] + node_outfiles + ["-o", args.outfile]
execute(cmd,
exit_msg="Failed to merge output files "
"collected from nodes", logger=logger)
else:
# Read each Changelogs db and generate finaldb
create_file(args.outfile, exit_on_err=True, logger=logger)
outfilemerger = OutputMerger(args.outfile + ".db", node_outfiles)
write_output(args.outfile, outfilemerger, args.field_separator)
try:
os.remove(args.outfile + ".db")
except (IOError, OSError):
pass
run_cmd_nodes("cleanup", args, tmpfilename=gtmpfilename)
with open(status_file_pre, "w", buffering=0) as f:
f.write(str(endtime_to_update))
sys.stdout.write("Generated output file %s\n" % args.outfile)
示例15: do_upload_cap
def do_upload_cap(s):
cl = int(s.headers['Content-Length'])
tmp_cap = "/tmp/" + next(tempfile._get_candidate_names()) + ".cap"
with open(tmp_cap + ".gz", "wb") as fid:
fid.write(s.rfile.read(cl))
decompress(tmp_cap)
# Check file is valid
output = subprocess.check_output(['wpaclean', tmp_cap + ".tmp", tmp_cap])
try:
os.remove(tmp_cap + ".tmp")
except:
pass
output_split = output.splitlines()
if len(output_split) > 2:
# We got more than 2 lines, which means there is a network
# in there with a WPA/2 PSK handshake
os.rename(tmp_cap + ".gz", "dcrack.cap.gz")
os.rename(tmp_cap, "dcrack.cap")
else:
# If nothing in the file, just delete it
os.remove(tmp_cap)
os.remove(tmp_cap + ".gz")