本文整理汇总了Python中clldutils.path.Path.joinpath方法的典型用法代码示例。如果您正苦于以下问题:Python Path.joinpath方法的具体用法?Python Path.joinpath怎么用?Python Path.joinpath使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类clldutils.path.Path
的用法示例。
在下文中一共展示了Path.joinpath方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: write
# 需要导入模块: from clldutils.path import Path [as 别名]
# 或者: from clldutils.path.Path import joinpath [as 别名]
def write(self, outdir='.', suffix='.csv', cited_sources_only=False, archive=False):
outdir = Path(outdir)
if not outdir.exists():
raise ValueError(outdir.as_posix())
close = False
if archive:
if isinstance(archive, Archive):
container = archive
else:
container = Archive(outdir.joinpath(self.name + '.zip'), mode='w')
close = True
else:
container = outdir
fname = Path(outdir).joinpath(self.name + suffix)
if fname.suffix in TAB_SUFFIXES:
self.table.dialect.delimiter = '\t'
with UnicodeWriter(
None if isinstance(container, Archive) else fname,
delimiter=self.table.dialect.delimiter) as writer:
writer.writerow(self.fields)
for row in self.rows:
writer.writerow(row.to_list())
if isinstance(container, Archive):
container.write_text(writer.read(), fname.name)
self.table.url = fname.name
self.metadata.write(Dataset.filename(fname, 'metadata'), container)
ids = self._cited_sources if cited_sources_only else None
self.sources.write(Dataset.filename(fname, 'sources'), container, ids=ids)
if close:
container.close()
示例2: __init__
# 需要导入模块: from clldutils.path import Path [as 别名]
# 或者: from clldutils.path.Path import joinpath [as 别名]
def __init__(self, name, default=None, **kw):
"""Initialization.
:param name: Basename for the config file (suffix .ini will be appended).
:param default: Default content of the config file.
"""
self.name = name
self.default = default
config_dir = Path(kw.pop('config_dir', None) or DIR)
RawConfigParser.__init__(self, kw, allow_no_value=True)
if self.default:
if PY3:
fp = io.StringIO(self.default)
else:
fp = io.BytesIO(self.default.encode('utf8'))
self.readfp(fp)
cfg_path = config_dir.joinpath(name + '.ini')
if cfg_path.exists():
assert cfg_path.is_file()
self.read(cfg_path.as_posix())
else:
if not config_dir.exists():
try:
config_dir.mkdir()
except OSError: # pragma: no cover
# this happens when run on travis-ci, by a system user.
pass
if config_dir.exists():
with open(cfg_path.as_posix(), 'w') as fp:
self.write(fp)
self.path = cfg_path
示例3: test_freeze
# 需要导入模块: from clldutils.path import Path [as 别名]
# 或者: from clldutils.path.Path import joinpath [as 别名]
def test_freeze(self):
from clld.scripts.freeze import freeze_func, unfreeze_func
tmp = Path(mkdtemp())
tmp.joinpath('data').mkdir()
tmp.joinpath('appname').mkdir()
class Args(object):
env = self.env
module_dir = tmp.joinpath('appname').resolve()
module = Mock(__name__='appname')
def data_file(self, *comps):
return tmp.resolve().joinpath('data', *comps)
DBSession.flush()
args = Args()
freeze_func(args, dataset=Dataset.first(), with_history=False)
self.assert_(tmp.joinpath('data.zip').exists())
engine = create_engine('sqlite://')
Base.metadata.create_all(engine)
self.assertEqual(
engine.execute('select count(*) from language').fetchone()[0], 0)
unfreeze_func(args, engine=engine)
s1 = DBSession
s2 = sessionmaker(bind=engine)()
self.assertEqual(s1.query(Language).count(), s2.query(Language).count())
l1 = s1.query(Language).filter(Language.latitude != null()).first()
l2 = s2.query(Language).filter(Language.pk == l1.pk).first()
self.assertEqual(l1.created, l2.created)
self.assertEqual(l1.latitude, l2.latitude)
self.assertEqual(l1.description, l2.description)
contrib = s2.query(Contribution).filter(Contribution.id == 'contribution').one()
self.assert_(contrib.primary_contributors)
self.assert_(contrib.secondary_contributors)
rmtree(tmp, ignore_errors=True)
示例4: create
# 需要导入模块: from clldutils.path import Path [as 别名]
# 或者: from clldutils.path.Path import joinpath [as 别名]
def create(self, dir_, content):
"""Write ``content`` to a file using ``dir_`` as file-system directory.
:return: File-system path of the file that was created.
"""
if not isinstance(dir_, Path):
dir_ = Path(dir_)
p = dir_.joinpath(self.relpath)
if not p.parent.exists():
p.parent.mkdir(parents=True)
with open(p.as_posix(), 'wb') as fp:
fp.write(content)
return p.as_posix()
示例5: write_info
# 需要导入模块: from clldutils.path import Path [as 别名]
# 或者: from clldutils.path.Path import joinpath [as 别名]
def write_info(self, outdir=None):
outdir = outdir or self.id
if not isinstance(outdir, Path):
outdir = Path(outdir)
if not outdir.exists():
outdir.mkdir()
fname = outdir.joinpath(self.fname('.ini'))
self.cfg.write(fname)
if os.linesep == '\n':
with fname.open(encoding='utf8') as fp:
text = fp.read()
with fname.open('w', encoding='utf8') as fp:
fp.write(text.replace('\n', '\r\n'))
return fname
示例6: WithTempDirMixin
# 需要导入模块: from clldutils.path import Path [as 别名]
# 或者: from clldutils.path.Path import joinpath [as 别名]
class WithTempDirMixin(object):
"""
Composable test fixture providing access to a temporary directory.
http://nedbatchelder.com/blog/201210/multiple_inheritance_is_hard.html
"""
def setUp(self):
super(WithTempDirMixin, self).setUp()
self.tmp = Path(mkdtemp())
def tearDown(self):
rmtree(self.tmp, ignore_errors=True)
super(WithTempDirMixin, self).tearDown()
def tmp_path(self, *comps):
return self.tmp.joinpath(*comps)
示例7: main
# 需要导入模块: from clldutils.path import Path [as 别名]
# 或者: from clldutils.path.Path import joinpath [as 别名]
def main(args):
Index('ducet', collkey(common.Value.name)).create(DBSession.bind)
repos = Path(os.path.expanduser('~')).joinpath('venvs/lexirumah/lexirumah-data')
with transaction.manager:
dataset = common.Dataset(
id=lexirumah.__name__,
name="lexirumah",
publisher_name="Max Planck Institute for the Science of Human History",
publisher_place="Jena",
publisher_url="http://shh.mpg.de",
license="http://creativecommons.org/licenses/by/4.0/",
domain='lexirumah.model-ling.eu',
contact='[email protected]',
jsondata={
'license_icon': 'cc-by.png',
'license_name': 'Creative Commons Attribution 4.0 International License'})
DBSession.add(dataset)
glottolog_repos = Path(
lexirumah.__file__).parent.parent.parent.parent.joinpath('glottolog3', 'glottolog')
languoids = {l.id: l for l in Glottolog(glottolog_repos).languoids()}
concepticon = Concepticon(
Path(lexirumah.__file__).parent.parent.parent.parent.joinpath('concepticon', 'concepticon-data'))
conceptsets = {c.id: c for c in concepticon.conceptsets.values()}
skip = True
for dname in sorted(repos.joinpath('datasets').iterdir(), key=lambda p: p.name):
#if dname.name == 'benuecongo':
# skip = False
#if skip:
# continue
if dname.is_dir() and dname.name != '_template':
mdpath = dname.joinpath('cldf', 'metadata.json')
if mdpath.exists():
print(dname.name)
import_cldf(dname, load(mdpath), languoids, conceptsets)
with transaction.manager:
load_families(
Data(),
DBSession.query(LexiRumahLanguage),
glottolog_repos=glottolog_repos,
isolates_icon='tcccccc')
示例8: main
# 需要导入模块: from clldutils.path import Path [as 别名]
# 或者: from clldutils.path.Path import joinpath [as 别名]
def main(args):
Index('ducet', collkey(common.Value.name)).create(DBSession.bind)
repos = Path(os.path.expanduser('~')).joinpath('venvs/lexibank/lexibank-data')
with transaction.manager:
dataset = common.Dataset(
id=lexibank.__name__,
name="lexibank",
publisher_name="Max Planck Institute for the Science of Human History",
publisher_place="Jena",
publisher_url="http://shh.mpg.de",
license="http://creativecommons.org/licenses/by/4.0/",
domain='lexibank.clld.org',
contact='[email protected]',
jsondata={
'license_icon': 'cc-by.png',
'license_name': 'Creative Commons Attribution 4.0 International License'})
DBSession.add(dataset)
glottolog = Glottolog(
Path(lexibank.__file__).parent.parent.parent.parent.joinpath('glottolog3', 'glottolog'))
languoids = {l.id: l for l in glottolog.languoids()}
concepticon = Concepticon(
Path(lexibank.__file__).parent.parent.parent.parent.joinpath('concepticon', 'concepticon-data'))
conceptsets = {c['ID']: c for c in concepticon.conceptsets()}
for dname in repos.joinpath('datasets').iterdir():
#if dname.name not in ['acbd']:
# continue
if dname.is_dir() and dname.name != '_template':
#if dname.name != 'zenodo34092':
# continue
mdpath = dname.joinpath('metadata.json')
if mdpath.exists():
print(dname.name)
import_cldf(dname, load(mdpath), languoids, conceptsets)
with transaction.manager:
load_families(
Data(),
DBSession.query(LexibankLanguage),
glottolog=languoids,
isolates_icon='tcccccc')
示例9: from_metadata
# 需要导入模块: from clldutils.path import Path [as 别名]
# 或者: from clldutils.path.Path import joinpath [as 别名]
def from_metadata(cls, fname):
fname = Path(fname)
if fname.is_dir():
name = '{0}{1}'.format(cls.__name__, MD_SUFFIX)
tablegroup = TableGroup.from_file(pkg_path('modules', name))
# adapt the path of the metadata file such that paths to tables are resolved
# correctly:
tablegroup._fname = fname.joinpath(name)
else:
tablegroup = TableGroup.from_file(fname)
comps = Counter()
for table in tablegroup.tables:
try:
comps.update([Dataset.get_tabletype(table)])
except ValueError:
pass
if comps and comps.most_common(1)[0][1] > 1:
raise ValueError('{0}: duplicate components!'.format(fname))
for mod in get_modules():
if mod.match(tablegroup):
return mod.cls(tablegroup)
return cls(tablegroup)
示例10: reflexes
# 需要导入模块: from clldutils.path import Path [as 别名]
# 或者: from clldutils.path.Path import joinpath [as 别名]
def reflexes(write_stats=True, path='concepticondata'):
"""
Returns a dictionary with concept set label as value and tuples of concept
list identifier and concept label as values.
"""
D, G = {}, {}
cpl = 0
cln = 0
clb = set([])
dpath = Path(path) if path else PKG_PATH
for i, cl in enumerate(dpath.joinpath('conceptlists').glob('*.tsv')):
concepts = list(reader(cl, namedtuples=True, delimiter="\t"))
for j,concept in enumerate([c for c in concepts if c.CONCEPTICON_ID]):
label = concept.GLOSS if hasattr(concept, 'GLOSS') else concept.ENGLISH
name = cl.name
try:
D[concept.CONCEPTICON_GLOSS] += [(name, label)]
except KeyError:
D[concept.CONCEPTICON_GLOSS] = [(name, label)]
try:
G[label] += [(concept.CONCEPTICON_ID, concept.CONCEPTICON_GLOSS, name)]
except KeyError:
G[label] = [(concept.CONCEPTICON_ID, concept.CONCEPTICON_GLOSS, name)]
clb.add(label)
cpl += 1
cln += 1
# write basic statistics and most frequent glosses
if write_stats:
txt = """# Concepticon Statistics
* concept sets (used): {0}
* concept lists: {1}
* concept labels: {2}
* concept labels (unique): {3}
* Ø concepts per list: {4:.2f}
* Ø concepts per concept set: {5:.2f}
* Ø unique concept labels per concept set: {6:.2f}
"""
txt = txt.format(
len(D),
cln,
cpl,
len(clb),
cpl / cln,
sum([len(v) for k,v in D.items()]) / len(D),
sum([len(set([label for _,label in v])) for k,v in D.items()]) / len(D)
)
txt += '# Twenty Most Diverse Concept Sets\n\n'
txt += '| No. | concept set | distinct labels | concept lists | examples |\n'
txt += '| --- | --- | --- | --- | --- |\n'
for i,(k,v) in enumerate(sorted(D.items(), key=lambda x: len(set([label for _,label in
x[1]])), reverse=True)[:20]):
txt += '| {0} | {1} | {2} | {3} | {4} |\n'.format(
i+1,
k,
len(set([label for _,label in v])),
len(set([clist for clist,_ in v])),
', '.join(sorted(set(['«{0}»'.format(label.replace('*','`*`')) for _,label in
v])))
)
txt += '# Twenty Most Frequent Concept Sets\n\n'
txt += '| No. | concept set | distinct labels | concept lists | examples |\n'
txt += '| --- | --- | --- | --- | --- |\n'
for i,(k,v) in enumerate(sorted(D.items(), key=lambda x: len(set([clist for clist,_ in
x[1]])), reverse=True)[:20]):
txt += '| {0} | {1} | {2} | {3} | {4} |\n'.format(
i+1,
k,
len(set([label for _,label in v])),
len(set([clist for clist,_ in v])),
', '.join(sorted(set(['«{0}»'.format(label.replace('*','`*`')) for _,label in
v])))
)
with dpath.joinpath('README.md').open('w', encoding='utf8') as fp:
fp.write(txt)
return D, G
示例11: write_info
# 需要导入模块: from clldutils.path import Path [as 别名]
# 或者: from clldutils.path.Path import joinpath [as 别名]
def write_info(self, outdir):
if not isinstance(outdir, Path):
outdir = Path(outdir)
self.cfg.write(outdir.joinpath(self.fname('.ini')))
示例12: Dictionary
# 需要导入模块: from clldutils.path import Path [as 别名]
# 或者: from clldutils.path.Path import joinpath [as 别名]
class Dictionary(object):
def __init__(self, filename, **kw):
kw.setdefault('entry_impl', Entry)
kw['marker_map'] = kw.get('marker_map') or {}
lexeme_marker = 'lx'
reverse_marker_map = {v: k for k, v in kw['marker_map'].items()}
if lexeme_marker in reverse_marker_map:
lexeme_marker = reverse_marker_map[lexeme_marker]
kw.setdefault('entry_prefix', '\\lx ')
kw.setdefault('entry_sep', '\\%s ' % lexeme_marker)
self.sfm = sfm.SFM.from_file(filename, **kw)
self.dir = Path(filename).parent
#def validated(self, entry):
# entry = sfm.Dictionary.validated(self, entry)
# return entry.preprocessed()
def stats(self):
stats = Stats()
self.sfm.visit(stats)
print(stats.count)
print(stats._mult_markers)
print(stats._implicit_mult_markers)
def process(self, outfile):
"""extract examples, etc."""
assert self.dir.name != 'processed'
self.sfm.visit(Rearrange())
with self.dir.joinpath('examples.log').open('w', encoding='utf8') as log:
extractor = ExampleExtractor(Corpus(self.dir), log)
self.sfm.visit(extractor)
self.sfm.write(outfile)
extractor.write_examples(outfile.parent.joinpath('examples.sfm'))
def load(
self,
submission,
did,
lid,
comparison_meanings,
comparison_meanings_alt_labels,
marker_map):
data = Data()
rel = []
vocab = models.Dictionary.get(did)
lang = models.Variety.get(lid)
for ex in Examples.from_file(self.dir.joinpath('examples.sfm')):
data.add(
common.Sentence,
ex.id,
id=ex.id,
name=ex.text,
language=lang,
analyzed=ex.morphemes,
gloss=ex.gloss,
description=ex.translation)
for i, entry in enumerate(self.sfm):
words = list(entry.get_words())
headword = None
for j, word in enumerate(words):
if not word.meanings:
print('no meanings for word %s' % word.form)
continue
if not headword:
headword = word.id
else:
rel.append((word.id, 'sub', headword))
for tw in word.rel:
rel.append((word.id, tw[0], tw[1]))
w = data.add(
models.Word,
word.id,
id='%s-%s-%s' % (submission.id, i + 1, j + 1),
name=word.form,
number=int(word.hm) if word.hm else 0,
phonetic=word.ph,
pos=word.ps,
dictionary=vocab,
language=lang)
DBSession.flush()
concepts = []
for k, meaning in enumerate(word.meanings):
if not (meaning.ge or meaning.de):
print('meaning without description for word %s' % w.name)
continue
if meaning.ge:
meaning.ge = meaning.ge.replace('.', ' ')
#.........这里部分代码省略.........