本文整理汇总了Python中tempfile.NamedTemporaryFile.write方法的典型用法代码示例。如果您正苦于以下问题:Python NamedTemporaryFile.write方法的具体用法?Python NamedTemporaryFile.write怎么用?Python NamedTemporaryFile.write使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tempfile.NamedTemporaryFile
的用法示例。
在下文中一共展示了NamedTemporaryFile.write方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_main
# 需要导入模块: from tempfile import NamedTemporaryFile [as 别名]
# 或者: from tempfile.NamedTemporaryFile import write [as 别名]
def test_main(self):
xml = """<record>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="s">Test Journal Name,100,10</subfield>
</datafield>
</record>"""
xml_temp_file = NamedTemporaryFile(dir=CFG_TMPDIR)
xml_temp_file.write(xml)
xml_temp_file.flush()
kb = "TEST JOURNAL NAME---Converted"
kb_temp_file = NamedTemporaryFile(dir=CFG_TMPDIR)
kb_temp_file.write(kb)
kb_temp_file.flush()
dest_temp_fd, dest_temp_path = mkstemp(dir=CFG_TMPDIR)
try:
os.close(dest_temp_fd)
process = subprocess.Popen([self.bin_path, xml_temp_file.name,
'--kb', kb_temp_file.name,
'-o', dest_temp_path],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
process.wait()
transformed_xml = open(dest_temp_path).read()
self.assertXmlEqual(transformed_xml, """<?xml version="1.0" encoding="UTF-8"?>
<collection xmlns="http://www.loc.gov/MARC21/slim">
<record><datafield ind1="C" ind2="5" tag="999"><subfield code="s">Converted,100,10</subfield></datafield></record>
</collection>""")
finally:
os.unlink(dest_temp_path)
示例2: compile_inline
# 需要导入模块: from tempfile import NamedTemporaryFile [as 别名]
# 或者: from tempfile.NamedTemporaryFile import write [as 别名]
def compile_inline(self,data,ext):
"""
Compile inline css. Have to compile to a file, because some css compilers
may not output to stdout, but we know they all output to a file. It's a
little hackish, but you shouldn't be compiling in production anyway,
right?
"""
compiler = settings.COMPILER_FORMATS[ext]
try:
bin = compiler['binary_path']
except:
raise Exception("Path to CSS compiler must be included in COMPILER_FORMATS")
tmp_file = NamedTemporaryFile(mode='w',suffix=ext)
tmp_file.write(dedent(data))
tmp_file.flush()
path, ext = os.path.splitext(tmp_file.name)
tmp_css = ''.join((path,'.css'))
self.compile(path,compiler)
data = open(tmp_css,'r').read()
# cleanup
tmp_file.close()
os.remove(tmp_css)
return data
示例3: run
# 需要导入模块: from tempfile import NamedTemporaryFile [as 别名]
# 或者: from tempfile.NamedTemporaryFile import write [as 别名]
def run(self):
active_view = self.window.active_view()
text = "\n\n".join(getSelectedText(active_view)).strip()
tf = NamedTemporaryFile(mode="w", delete=False)
try:
tf.write(text)
tf.close()
res = subprocess.check_output(["m4", tf.name],
stderr=subprocess.STDOUT,
cwd=os.path.dirname(os.path.abspath(active_view.file_name())))
res = res.decode('utf-8').replace('\r', '').strip()
panel_name = "m4expand.results"
panel = self.window.create_output_panel(panel_name)
self.window.run_command("show_panel", {"panel": "output." + panel_name})
panel.set_read_only(False)
panel.set_syntax_file(active_view.settings().get("syntax"))
panel.run_command("append", {"characters": res})
panel.set_read_only(True)
except Exception as e:
print("M4Expand - An error occurred: ", e)
finally:
os.unlink(tf.name)
示例4: odt_subreport
# 需要导入模块: from tempfile import NamedTemporaryFile [as 别名]
# 或者: from tempfile.NamedTemporaryFile import write [as 别名]
def odt_subreport(name=None, obj=None):
if not aeroo_ooo:
return _("Error! Subreports not available!")
report_xml_ids = ir_obj.search(cr, uid, [('report_name', '=', name)], context=context)
if report_xml_ids:
service = netsvc.Service._services['report.%s' % name]
report_xml = ir_obj.browse(cr, uid, report_xml_ids[0], context=context)
data = {'model': obj._table_name, 'id': obj.id, 'report_type': 'aeroo', 'in_format': 'oo-odt'}
### Get new printing object ###
sub_aeroo_print = AerooPrint()
service.active_prints[sub_aeroo_print.id] = sub_aeroo_print
context['print_id'] = sub_aeroo_print.id
###############################
sub_aeroo_print.start_time = time.time()
report, output = service.create_aeroo_report(cr, uid, \
[obj.id], data, report_xml, context=context,
output='odt') # change for OpenERP 6.0 - Service class usage
### Delete printing object ###
AerooPrint.print_ids.remove(sub_aeroo_print.id)
del service.active_prints[sub_aeroo_print.id]
##############################
temp_file = NamedTemporaryFile(suffix='.odt', prefix='aeroo-report-', delete=False)
try:
temp_file.write(report)
finally:
temp_file.close()
# self.oo_subreports[print_id].append(temp_file.name)
# aeroo_print.subreports.append(temp_file.name)
self.active_prints[aeroo_print.id].subreports.append(temp_file.name)
return "<insert_doc('%s')>" % temp_file.name
return None
示例5: test_safe_md5
# 需要导入模块: from tempfile import NamedTemporaryFile [as 别名]
# 或者: from tempfile.NamedTemporaryFile import write [as 别名]
def test_safe_md5(self):
"""Make sure we have the expected md5 with varied input types
This method is ported from PyCogent (http://www.pycogent.org). PyCogent
is a GPL project, but we obtained permission from the authors of this
method to port it to the BIOM Format project (and keep it under BIOM's
BSD license).
"""
exp = 'd3b07384d113edec49eaa6238ad5ff00'
tmp_f = NamedTemporaryFile(
mode='w',
prefix='test_safe_md5',
suffix='txt')
tmp_f.write('foo\n')
tmp_f.flush()
obs = safe_md5(open(tmp_f.name, 'U'))
self.assertEqual(obs, exp)
obs = safe_md5(['foo\n'])
self.assertEqual(obs, exp)
# unsupported type raises TypeError
self.assertRaises(TypeError, safe_md5, 42)
示例6: _write_local_schema_file
# 需要导入模块: from tempfile import NamedTemporaryFile [as 别名]
# 或者: from tempfile.NamedTemporaryFile import write [as 别名]
def _write_local_schema_file(self, cursor):
"""
Takes a cursor, and writes the BigQuery schema for the results to a
local file system.
:return: A dictionary where key is a filename to be used as an object
name in GCS, and values are file handles to local files that
contains the BigQuery schema fields in .json format.
"""
schema = []
for field in cursor.description:
# See PEP 249 for details about the description tuple.
field_name = field[0]
field_type = self.type_map(field[1])
field_mode = 'REPEATED' if field[1] in (1009, 1005, 1007,
1016) else 'NULLABLE'
schema.append({
'name': field_name,
'type': field_type,
'mode': field_mode,
})
self.log.info('Using schema for %s: %s', self.schema_filename, schema)
tmp_schema_file_handle = NamedTemporaryFile(delete=True)
s = json.dumps(schema, sort_keys=True)
if PY3:
s = s.encode('utf-8')
tmp_schema_file_handle.write(s)
return {self.schema_filename: tmp_schema_file_handle}
示例7: render_to_temporary_file
# 需要导入模块: from tempfile import NamedTemporaryFile [as 别名]
# 或者: from tempfile.NamedTemporaryFile import write [as 别名]
def render_to_temporary_file(self, template_name, mode='w+b', bufsize=-1,
suffix='.html', prefix='tmp', dir=None,
delete=True):
template = self.resolve_template(template_name)
context = self.resolve_context(self.context_data)
content = smart_str(template.render(context))
content = make_absolute_paths(content)
try:
tempfile = NamedTemporaryFile(mode=mode, bufsize=bufsize,
suffix=suffix, prefix=prefix,
dir=dir, delete=delete)
except TypeError:
tempfile = NamedTemporaryFile(mode=mode, buffering=bufsize,
suffix=suffix, prefix=prefix,
dir=dir, delete=delete)
try:
tempfile.write(content)
tempfile.flush()
return tempfile
except TypeError:
tempfile.write(bytes(content, 'UTF-8'))
tempfile.flush()
return tempfile
except:
# Clean-up tempfile if an Exception is raised.
tempfile.close()
raise
示例8: get_logs
# 需要导入模块: from tempfile import NamedTemporaryFile [as 别名]
# 或者: from tempfile.NamedTemporaryFile import write [as 别名]
def get_logs(self):
"""
Build the logs entry for the metadata 'output' section
:return: list, Output instances
"""
# Collect logs from server
kwargs = {}
if self.namespace is not None:
kwargs['namespace'] = self.namespace
logs = self.osbs.get_build_logs(self.build_id, **kwargs)
# Deleted once closed
logfile = NamedTemporaryFile(prefix=self.build_id,
suffix=".log",
mode='w')
logfile.write(logs)
logfile.flush()
docker_logs = NamedTemporaryFile(prefix="docker-%s" % self.build_id,
suffix=".log",
mode='w')
docker_logs.write("\n".join(self.workflow.build_logs))
docker_logs.flush()
return [Output(file=docker_logs,
metadata=self.get_output_metadata(docker_logs.name,
"build.log")),
Output(file=logfile,
metadata=self.get_output_metadata(logfile.name,
"openshift-final.log"))]
示例9: _write_local_data_files
# 需要导入模块: from tempfile import NamedTemporaryFile [as 别名]
# 或者: from tempfile.NamedTemporaryFile import write [as 别名]
def _write_local_data_files(self, cursor):
"""
Takes a cursor, and writes results to a local file.
:return: A dictionary where keys are filenames to be used as object
names in GCS, and values are file handles to local files that
contain the data for the GCS objects.
"""
schema = list(map(lambda schema_tuple: schema_tuple[0], cursor.description))
file_no = 0
tmp_file_handle = NamedTemporaryFile(delete=True)
tmp_file_handles = {self.filename.format(file_no): tmp_file_handle}
for row in cursor:
# Convert datetime objects to utc seconds, and decimals to floats
row = map(self.convert_types, row)
row_dict = dict(zip(schema, row))
# TODO validate that row isn't > 2MB. BQ enforces a hard row size of 2MB.
s = json.dumps(row_dict)
if PY3:
s = s.encode('utf-8')
tmp_file_handle.write(s)
# Append newline to make dumps BigQuery compatible.
tmp_file_handle.write(b'\n')
# Stop if the file exceeds the file size limit.
if tmp_file_handle.tell() >= self.approx_max_file_size_bytes:
file_no += 1
tmp_file_handle = NamedTemporaryFile(delete=True)
tmp_file_handles[self.filename.format(file_no)] = tmp_file_handle
return tmp_file_handles
示例10: testFileAnnotationSpeed
# 需要导入模块: from tempfile import NamedTemporaryFile [as 别名]
# 或者: from tempfile.NamedTemporaryFile import write [as 别名]
def testFileAnnotationSpeed(author_testimg_generated, gatewaywrapper):
""" Tests speed of loading file annotations. See PR: 4176 """
try:
f = NamedTemporaryFile()
f.write("testFileAnnotationSpeed text")
ns = TESTANN_NS
image = author_testimg_generated
# use the same file to create many file annotations
for i in range(20):
fileAnn = gatewaywrapper.gateway.createFileAnnfromLocalFile(f.name, mimetype="text/plain", ns=ns)
image.linkAnnotation(fileAnn)
finally:
f.close()
now = time.time()
for ann in image.listAnnotations():
if ann._obj.__class__ == omero.model.FileAnnotationI:
# mimmic behaviour of templates which call multiple times
print ann.getId()
print ann.getFileName()
print ann.getFileName()
print ann.getFileSize()
print ann.getFileSize()
print time.time() - now
示例11: SubmitPlay
# 需要导入模块: from tempfile import NamedTemporaryFile [as 别名]
# 或者: from tempfile.NamedTemporaryFile import write [as 别名]
def SubmitPlay(request):
import jinja2
from tempfile import NamedTemporaryFile
import os
html = ''
inventory = """
[current]
{{ public_ip_address }}
"""
# for (name,value) in request.GET:
# if name=='Servers':
# html=html+str(value+'\n')
html=request.GET['Severs']
inventory_template = jinja2.Template(inventory)
rendered_inventory = inventory_template.render({
'public_ip_address': html
# and the rest of our variables
})
# Create a temporary file and write the template string to it
hosts = NamedTemporaryFile(delete=False)
hosts.write(rendered_inventory)
hosts.close()
print(hosts.name)
import ansiblepythonapi as myPlay
args=['/home/ec2-user/playss/AnsiblePlus/test.yml']
# args.append('-i')
# args.append(hosts.name)
message=myPlay.main(args)
objects=[]
for runner_results in myPlay.message:
values=[]
for (host, value) in runner_results.get('dark', {}).iteritems():
try:
values.append(host)
values.append(value['failed'])
values.append(value['msg'])
objects.append(values)
except:
pass
for (host, value) in runner_results.get('contacted', {}).iteritems():
try:
values.append(host)
values.append(value['failed'])
values.append(value['msg'])
objects.append(values)
except:
pass
# for msg in pb.stats.output():
context=Context({'Summary':objects})
return render(request, 'AnsibleResponce.html',context)
示例12: hmmscan
# 需要导入模块: from tempfile import NamedTemporaryFile [as 别名]
# 或者: from tempfile.NamedTemporaryFile import write [as 别名]
def hmmscan(fasta, database_path, ncpus=10):
F = NamedTemporaryFile()
F.write(fasta)
F.flush()
OUT = NamedTemporaryFile()
cmd = '%s --cpu %s -o /dev/null -Z 190000 --tblout %s %s %s' %(HMMSCAN, ncpus, OUT.name, database_path, F.name)
#print cmd
sts = subprocess.call(cmd, shell=True)
byquery = defaultdict(list)
if sts == 0:
for line in OUT:
#['#', '---', 'full', 'sequence', '----', '---', 'best', '1', 'domain', '----', '---', 'domain', 'number', 'estimation', '----']
#['#', 'target', 'name', 'accession', 'query', 'name', 'accession', 'E-value', 'score', 'bias', 'E-value', 'score', 'bias', 'exp', 'reg', 'clu', 'ov', 'env', 'dom', 'rep', 'inc', 'description', 'of', 'target']
#['#-------------------', '----------', '--------------------', '----------', '---------', '------', '-----', '---------', '------', '-----', '---', '---', '---', '---', '---', '---', '---', '---', '---------------------']
#['delNOG20504', '-', '553220', '-', '1.3e-116', '382.9', '6.2', '3.4e-116', '381.6', '6.2', '1.6', '1', '1', '0', '1', '1', '1', '1', '-']
if line.startswith('#'): continue
fields = line.split() # output is not tab delimited! Should I trust this split?
hit, _, query, _ , evalue, score, bias, devalue, dscore, dbias = fields[0:10]
evalue, score, bias, devalue, dscore, dbias = map(float, [evalue, score, bias, devalue, dscore, dbias])
byquery[query].append([hit, evalue, score])
OUT.close()
F.close()
return byquery
示例13: run_via_pbs
# 需要导入模块: from tempfile import NamedTemporaryFile [as 别名]
# 或者: from tempfile.NamedTemporaryFile import write [as 别名]
def run_via_pbs(args, pbs):
assert(pbs in ('condor',)) # for now
# TODO: RF to support multiple backends, parameters, etc, for now -- just condor, no options
f = NamedTemporaryFile('w', prefix='datalad-%s-' % pbs, suffix='.submit', delete=False)
try:
pwd = getpwd()
logs = f.name.replace('.submit', '.log')
exe = args[0]
# TODO: we might need better way to join them, escaping spaces etc. There must be a stock helper
#exe_args = ' '.join(map(repr, args[1:])) if len(args) > 1 else ''
exe_args = ' '.join(args[1:]) if len(args) > 1 else ''
f.write("""\
Executable = %(exe)s
Initialdir = %(pwd)s
Output = %(logs)s
Error = %(logs)s
getenv = True
arguments = %(exe_args)s
queue
""" % locals())
f.close()
Runner().run(['condor_submit', f.name])
lgr.info("Scheduled execution via %s. Logs will be stored under %s" % (pbs, logs))
finally:
os.unlink(f.name)
示例14: get_splice_score
# 需要导入模块: from tempfile import NamedTemporaryFile [as 别名]
# 或者: from tempfile.NamedTemporaryFile import write [as 别名]
def get_splice_score(a, s_type=5):
if s_type not in [3,5]:
raise Exception("Invalid splice type {}, should be 3 or 5".format(s_type))
maxent = config.maxentpath
if not maxent:
raise Exception("Please provide path to the score5.pl and score3.pl maxent scripts in config file")
tmp = NamedTemporaryFile()
for name,seq in a:
tmp.write(">{}\n{}\n".format(name,seq))
tmp.flush()
cmd = "perl score{}.pl {}".format(s_type, tmp.name)
p = sp.Popen(cmd, shell=True, cwd=maxent, stdout=sp.PIPE)
score = 0
for line in p.stdout.readlines():
vals = line.strip().split("\t")
if len(vals) > 1:
try:
score += float(vals[-1])
except ValueError:
logger.error("valueError, skipping: {}".format(vals))
except:
logger.error("Something unexpected happened")
return score
示例15: _generate_training_files
# 需要导入模块: from tempfile import NamedTemporaryFile [as 别名]
# 或者: from tempfile.NamedTemporaryFile import write [as 别名]
def _generate_training_files(self):
"""Returns a tuple of file objects suitable for passing to the
RdpTrainer application controller.
"""
tmp_dir = get_qiime_temp_dir()
training_set = RdpTrainingSet()
reference_seqs_file = open(self.Params['reference_sequences_fp'], 'U')
id_to_taxonomy_file = open(self.Params['id_to_taxonomy_fp'], 'U')
for seq_id, seq in MinimalFastaParser(reference_seqs_file):
training_set.add_sequence(seq_id, seq)
for line in id_to_taxonomy_file:
seq_id, lineage_str = map(strip, line.split('\t'))
training_set.add_lineage(seq_id, lineage_str)
training_set.dereplicate_taxa()
rdp_taxonomy_file = NamedTemporaryFile(
prefix='RdpTaxonAssigner_taxonomy_', suffix='.txt', dir=tmp_dir)
rdp_taxonomy_file.write(training_set.get_rdp_taxonomy())
rdp_taxonomy_file.seek(0)
rdp_training_seqs_file = NamedTemporaryFile(
prefix='RdpTaxonAssigner_training_seqs_', suffix='.fasta',
dir=tmp_dir)
for rdp_id, seq in training_set.get_training_seqs():
rdp_training_seqs_file.write('>%s\n%s\n' % (rdp_id, seq))
rdp_training_seqs_file.seek(0)
self._training_set = training_set
return rdp_taxonomy_file, rdp_training_seqs_file