本文整理汇总了Python中clldutils.path.Path.as_posix方法的典型用法代码示例。如果您正苦于以下问题:Python Path.as_posix方法的具体用法?Python Path.as_posix怎么用?Python Path.as_posix使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类clldutils.path.Path
的用法示例。
在下文中一共展示了Path.as_posix方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _download_sql_dump
# 需要导入模块: from clldutils.path import Path [as 别名]
# 或者: from clldutils.path.Path import as_posix [as 别名]
def _download_sql_dump(rel, log):
target = Path('glottolog-{0}.sql.gz'.format(rel['version']))
log.info('retrieving {0}'.format(rel['sql_dump_url']))
urlretrieve(rel['sql_dump_url'], target.as_posix())
assert md5(target) == rel['sql_dump_md5']
unpacked = target.with_suffix('')
with gzip.open(target.as_posix()) as f, unpacked.open('wb') as u:
shutil.copyfileobj(f, u)
target.unlink()
log.info('SQL dump for Glottolog release {0} written to {1}'.format(
rel['version'], unpacked))
示例2: create
# 需要导入模块: from clldutils.path import Path [as 别名]
# 或者: from clldutils.path.Path import as_posix [as 别名]
def create(self, dir_, content):
"""Write ``content`` to a file using ``dir_`` as file-system directory.
:return: File-system path of the file that was created.
"""
p = Path(dir_).joinpath(self.relpath)
if not p.parent.exists():
p.parent.mkdir(parents=True)
with open(p.as_posix(), 'wb') as fp:
if isinstance(content, text_type):
content = content.encode('utf8')
fp.write(content)
return p.as_posix()
示例3: write
# 需要导入模块: from clldutils.path import Path [as 别名]
# 或者: from clldutils.path.Path import as_posix [as 别名]
def write(self, outdir='.', suffix='.csv', cited_sources_only=False, archive=False):
outdir = Path(outdir)
if not outdir.exists():
raise ValueError(outdir.as_posix())
close = False
if archive:
if isinstance(archive, Archive):
container = archive
else:
container = Archive(outdir.joinpath(self.name + '.zip'), mode='w')
close = True
else:
container = outdir
fname = Path(outdir).joinpath(self.name + suffix)
if fname.suffix in TAB_SUFFIXES:
self.table.dialect.delimiter = '\t'
with UnicodeWriter(
None if isinstance(container, Archive) else fname,
delimiter=self.table.dialect.delimiter) as writer:
writer.writerow(self.fields)
for row in self.rows:
writer.writerow(row.to_list())
if isinstance(container, Archive):
container.write_text(writer.read(), fname.name)
self.table.url = fname.name
self.metadata.write(Dataset.filename(fname, 'metadata'), container)
ids = self._cited_sources if cited_sources_only else None
self.sources.write(Dataset.filename(fname, 'sources'), container, ids=ids)
if close:
container.close()
示例4: lff2tree
# 需要导入模块: from clldutils.path import Path [as 别名]
# 或者: from clldutils.path.Path import as_posix [as 别名]
def lff2tree(tree=TREE, outdir=None, builddir=None, lffs=None):
"""
- get mapping glottocode -> Languoid from old tree
- assemble new directory tree
- for each path component in lff/dff:
- create new dir
- copy info file from old tree (possibly updating the name) or
- create info file
- for each language/dialect in lff/dff:
- create new dir
- copy info file from old tree (possibly updating the name) or
- create info file
- rm old tree
- copy new tree
"""
# FIXME: instead of removing trees, we should just move the current one
# from outdir to build, and then recreate in outdir.
builddir = Path(builddir) if builddir else build_path("tree")
old_tree = {l.id: l for l in walk_tree(tree)} if tree else {}
out = Path(outdir or tree)
if not out.parent.exists():
out.parent.mkdir()
if out.exists():
if builddir.exists():
try:
rmtree(builddir)
except: # pragma: no cover
pass
if builddir.exists(): # pragma: no cover
raise ValueError("please remove %s before proceeding" % builddir)
# move the old tree out of the way
shutil.move(out.as_posix(), builddir.as_posix())
out.mkdir()
lffs = lffs or {}
languages = {}
for lang in read_lff(Level.language, fp=lffs.get(Level.language)):
languages[lang.id] = lang
lang2tree(lang, lang.lineage, out, old_tree)
for lang in read_lff(Level.dialect, fp=lffs.get(Level.dialect)):
if not lang.lineage or lang.lineage[0][1] not in languages:
raise ValueError("unattached dialect") # pragma: no cover
lang2tree(lang, languages[lang.lineage[0][1]].lineage + lang.lineage, out, old_tree)
示例5: TemporaryPath
# 需要导入模块: from clldutils.path import Path [as 别名]
# 或者: from clldutils.path.Path import as_posix [as 别名]
class TemporaryPath(object):
def __init__(self, suffix=''):
fp = NamedTemporaryFile(suffix=suffix)
self.name = Path(fp.name)
fp.close()
def __enter__(self):
return self.name.as_posix()
def __exit__(self, exc_type, exc_val, exc_tb):
if self.name.exists():
remove(self.name)
示例6: dependencies_graph
# 需要导入模块: from clldutils.path import Path [as 别名]
# 或者: from clldutils.path.Path import as_posix [as 别名]
def dependencies_graph(imps):
deps = dict([((f1, f2), v) for (v, f1, f2) in imps if v > 0.0])
V = set([f for fs in deps.iterkeys() for f in fs])
G = dict([(k, v) for (k, v) in deps.items() if v > 0.0])
MSTs = [mst(G, x) for x in V]
(mv, H) = max([(sum(H.values()), H) for H in MSTs])
#W = dict([(y, 1.0-v) for ((x, y), v) in H.iteritems()])
#sav(dot(H, V), 'grambank_mst.gv')
path = Path(grambank.__file__).parent.joinpath('static', 'dependencies.gv')
with open(path.as_posix(), 'w') as fp:
fp.write(dot(H, V))
return (H, V) #dot(H, V)
示例7: test_extractor
# 需要导入模块: from clldutils.path import Path [as 别名]
# 或者: from clldutils.path.Path import as_posix [as 别名]
def test_extractor(self):
config = self.make_cfg(
[config_path(f).as_posix() for f in ("admin", "mk", "embed_data")])
xml = beastling.beastxml.BeastXml(config)
xmlfile = self.tmp.joinpath("beastling.xml")
xml.write_file(xmlfile.as_posix())
self.assertTrue(bool(self._extract(xmlfile)))
config = self.make_cfg({
'admin': {'basename': 'abcdefg'},
'model': {
'model': 'mk',
'data': data_path('basic.csv').as_posix()}})
xml = beastling.beastxml.BeastXml(config)
xmlfile = self.tmp.joinpath("beastling.xml")
xml.write_file(xmlfile.as_posix())
beastling.extractor.extract(xmlfile)
p = Path('abcdefg.conf')
self.assertTrue(p.exists())
cfg = INI(interpolation=None)
cfg.read(p.as_posix())
remove(p)
self.assertEqual(cfg['admin']['basename'], 'abcdefg')
self.assertEqual(cfg['model']['model'], 'mk')
fname = self.tmp.joinpath('test.xml')
datafile = self.tmp.joinpath(('test.csv'))
self.assertFalse(datafile.exists())
with fname.open('w', encoding='utf8') as fp:
fp.write("""<?xml version="1.0" encoding="UTF-8"?>
<r>
<!--%s
%s
[admin]
[model]
-->
<!--%s:%s-->
</r>
""" % (beastling.extractor._generated_str,
beastling.extractor._config_file_str,
beastling.extractor._data_file_str,
datafile.as_posix()))
res = self._extract(fname)
self.assertIn(datafile.name, ''.join(res))
示例8: create
# 需要导入模块: from clldutils.path import Path [as 别名]
# 或者: from clldutils.path.Path import as_posix [as 别名]
def create(self, req, filename=None, verbose=True):
p = self.abspath(req)
if not p.parent.exists(): # pragma: no cover
p.parent.mkdir()
tmp = Path('%s.tmp' % p.as_posix())
if self.rdf:
# we do not create archives with a readme for rdf downloads, because each
# RDF entity points to the dataset and the void description of the dataset
# covers all relevant metadata.
#
# TODO: write test for the file name things!?
#
with closing(GzipFile(
filename=Path(tmp.stem).stem, fileobj=tmp.open('wb')
)) as fp:
self.before(req, fp)
for i, item in enumerate(page_query(self.query(req), verbose=verbose)):
self.dump(req, fp, item, i)
self.after(req, fp)
else:
with ZipFile(tmp.as_posix(), 'w', ZIP_DEFLATED) as zipfile:
if not filename:
fp = self.get_stream()
self.before(req, fp)
for i, item in enumerate(
page_query(self.query(req), verbose=verbose)):
self.dump(req, fp, item, i)
self.after(req, fp)
zipfile.writestr(self.name, self.read_stream(fp))
else: # pragma: no cover
zipfile.write(filename, self.name)
zipfile.writestr(
'README.txt',
README.format(
req.dataset.name,
'=' * (
len(req.dataset.name)
+ len(' data download')),
req.dataset.license,
TxtCitation(None).render(req.dataset, req)).encode('utf8'))
if p.exists(): # pragma: no cover
remove(p)
move(tmp, p)
示例9: _str_path
# 需要导入模块: from clldutils.path import Path [as 别名]
# 或者: from clldutils.path.Path import as_posix [as 别名]
def _str_path(path, mkdir=False):
"""Get a file-system path as text_type, suitable for passing into io.open.
Parameters
----------
path : {text_type, Path}
A fs path either as Path instance or as text_type.
mkdir : bool (default=False)
If True, create the directories within the path.
Returns
-------
path : text_type
The path as text_type.
"""
res = Path(path_component(path))
if mkdir and res.parent and not res.parent.exists():
res.parent.mkdir(parents=True)
return res.as_posix()
示例10: contribution_detail_html
# 需要导入模块: from clldutils.path import Path [as 别名]
# 或者: from clldutils.path.Path import as_posix [as 别名]
def contribution_detail_html(context=None, request=None, **kw):
if context.id == 's4':
raise HTTPFound(request.route_url('genealogy'))
p = Path(wals3.__file__).parent.joinpath(
'static', 'descriptions', str(context.id), 'body.xhtml')
c = codecs.open(p.as_posix(), encoding='utf8').read()
adapter = get_adapter(IRepresentation, Feature(), request, ext='snippet.html')
for feature in DBSession.query(Feature)\
.filter(Feature.contribution_pk == context.pk)\
.options(joinedload_all(Feature.domain, DomainElement.values)):
table = soup(adapter.render(feature, request))
values = '\n'.join('%s' % table.find(tag).extract()
for tag in ['thead', 'tbody'])
c = c.replace('__values_%s__' % feature.id, values)
return {'text': c.replace('http://wals.info', request.application_url)}
示例11: rewrite
# 需要导入模块: from clldutils.path import Path [as 别名]
# 或者: from clldutils.path.Path import as_posix [as 别名]
def rewrite(fname, visitor, **kw):
"""Utility function to rewrite rows in tsv files.
:param fname: Path of the dsv file to operate on.
:param visitor: A callable that takes a line-number and a row as input and returns a \
(modified) row or None to filter out the row.
:param kw: Keyword parameters are passed through to csv.reader/csv.writer.
"""
if not isinstance(fname, Path):
assert isinstance(fname, string_types)
fname = Path(fname)
assert fname.is_file()
tmp = fname.parent.joinpath('.tmp.' + fname.name)
with UnicodeReader(fname, **kw) as reader_:
with UnicodeWriter(tmp, **kw) as writer:
for i, row in enumerate(reader_):
row = visitor(i, row)
if row is not None:
writer.writerow(row)
shutil.move(tmp.as_posix(), fname.as_posix())
示例12: from_ini
# 需要导入模块: from clldutils.path import Path [as 别名]
# 或者: from clldutils.path.Path import as_posix [as 别名]
def from_ini(cls, ini, nodes={}):
if not isinstance(ini, Path):
ini = Path(ini)
directory = ini.parent
cfg = INI()
cfg.read(ini.as_posix(), encoding='utf8')
lineage = []
for parent in directory.parents:
id_ = parent.name.split('.')[-1]
assert id_ != directory.name.split('.')[-1]
if not cls.id_pattern.match(id_):
# we ignore leading non-languoid-dir path components.
break
if id_ not in nodes:
l = Languoid.from_dir(parent, nodes=nodes)
nodes[id_] = (l.name, l.id, l.level)
lineage.append(nodes[id_])
res = cls(cfg, list(reversed(lineage)))
nodes[res.id] = (res.name, res.id, res.level)
return res
示例13: from_ini
# 需要导入模块: from clldutils.path import Path [as 别名]
# 或者: from clldutils.path.Path import as_posix [as 别名]
def from_ini(cls, ini, nodes=None):
if nodes is None:
nodes = {}
ini = Path(ini)
directory = ini.parent
cfg = INI(interpolation=None)
cfg.read(ini.as_posix(), encoding='utf8')
lineage = []
for parent in directory.parents:
id_ = parent.name
assert id_ != directory.name
if not Glottocode.pattern.match(id_):
# we ignore leading non-languoid-dir path components.
break
if id_ not in nodes:
l = Languoid.from_dir(parent, nodes=nodes)
nodes[id_] = (l.name, l.id, l.level)
lineage.append(nodes[id_])
res = cls(cfg, list(reversed(lineage)), directory=directory)
nodes[res.id] = (res.name, res.id, res.level)
return res
示例14: create
# 需要导入模块: from clldutils.path import Path [as 别名]
# 或者: from clldutils.path.Path import as_posix [as 别名]
def create(self, req, filename=None, verbose=True):
p = self.abspath(req)
if not p.parent.exists(): # pragma: no cover
p.parent.mkdir()
tmp = Path('%s.tmp' % p)
language_url_pattern = self.route_url_pattern(req, 'language')
with ZipFile(tmp.as_posix(), 'w', ZIP_DEFLATED) as zipfile:
tables = []
for param in DBSession.query(Parameter).options(joinedload(Parameter.domain)):
fname = '%s-%s.csv' % (req.dataset.id, param.id)
zipfile.writestr(fname, self.get_values(param, language_url_pattern))
tables.append({
'@type': 'Table',
'url': fname,
'notes': [
{
'@id': req.resource_url(param),
'dc:identifier': param.id,
'dc:title': param.name,
'dc:description': param.description or ''}] + [
{
'@type': 'DomainElement',
'name': de.name,
'description': de.description,
'numeric': de.number
} for de in param.domain
],
})
md = CsvmJsonAdapter.csvm_basic_doc(req, tables=tables)
md.update({
'@type': 'TableGroup',
'dc:language': list(self.get_languages(req, language_url_pattern)),
'tableSchema': {
"columns": [
{
"name": "ID",
"datatype": "string",
"required": True
},
{
"name": "Language_ID",
"datatype": "string",
"required": True
},
{
"name": "Parameter_ID",
"datatype": "string",
"required": True
},
{
"name": "Contribution_ID",
"datatype": "string",
"required": True
},
{
"name": "Value",
"datatype": "string",
"required": True
},
{
"name": "Source",
"datatype": "string",
},
{
"name": "Comment",
"datatype": "string",
},
],
"primaryKey": "ID",
'aboutUrl': self.route_url_pattern(req, 'value', '{ID}'),
},
})
zipfile.writestr(
'%s.csv-metadata.json' % req.dataset.id, json.dumps(md, indent=4))
bib = Database([
rec.bibtex() for rec in DBSession.query(Source).order_by(Source.name)])
zipfile.writestr('%s.bib' % req.dataset.id, ('%s' % bib).encode('utf8'))
zipfile.writestr(
'README.txt',
README.format(
req.dataset.name,
'=' * (
len(req.dataset.name)
+ len(' data download')),
req.dataset.license,
TxtCitation(None).render(req.dataset, req)).encode('utf8'))
if p.exists(): # pragma: no cover
remove(p)
move(tmp, p)