本文整理汇总了Python中bibtexparser.bibdatabase.BibDatabase类的典型用法代码示例。如果您正苦于以下问题:Python BibDatabase类的具体用法?Python BibDatabase怎么用?Python BibDatabase使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了BibDatabase类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_multiple_string_write
def test_multiple_string_write(self):
bib_database = BibDatabase()
bib_database.strings['name1'] = 'value1'
bib_database.strings['name2'] = 'value2' # Order is important!
result = bibtexparser.dumps(bib_database)
expected = '@string{name1 = "value1"}\n\[email protected]{name2 = "value2"}\n\n'
self.assertEqual(result, expected)
示例2: save_citation
def save_citation(citation_record):
cite_anchor = citation_record.find('a', {'class': 'gs_nph', 'href': '#', "role": "button"})
if not cite_anchor or not cite_anchor['onclick']:
logging.warn("No Cite anchor for citation: %s" % citation_record)
return
citation_id = cite_anchor['onclick'].split(',')[1][1:-1]
logging.info("Getting formated cite from citation id: " + citation_id)
params = {"q": "info:%s:scholar.google.com/" % citation_id, "output": "cite"}
soup = create_soup_by_url("https://scholar.google.com/scholar", params)
bib_anchor = soup.find('a', {"class": "gs_citi"})
if not bib_anchor:
logging.debug("BibTex page soup is: %s" % soup.getText())
logging.warn("No BibTex citation provided for citation: %s" % citation_id)
return
soup = create_soup_by_url(bib_anchor['href'])
global citation_num
citation_num += 1
# Adding a tag to the bib entry about google scholar citation ID
citation_entry = bibtexparser.loads(soup.getText()).entries[0]
citationID = citation_entry['ID'] # e.g., melville2004review
citation_entry["gscholar_id"] = citation_id
db_entry=[]
db_entry.append(citation_entry)
db = BibDatabase()
db.entries = db_entry
g_bib_entry = bibtexparser.dumps(db)
bib_entry = "%% [%d]\n%s" % (citation_num, g_bib_entry)
logging.info(bib_entry.strip())
with open(opts.citation_name, "a+") as f:
f.write(bib_entry.encode('utf-8'))
if opts.should_download:
pdf_div = citation_record.find('div', {"class": "gs_ggs gs_fl"})
if pdf_div:
download_pdf(pdf_div.a['href'], citationID)
示例3: merge
def merge(entry1, entry2):
db = BibDatabase()
entries = {}
keys1 = entry1.keys()
keys2 = entry2.keys()
intersection = intersect(keys1, keys2)
union = get_union(keys1, keys2)
not_intersect = not_intersection(union, intersection)
#The two entries have the same keys, so everything needs to be merged
if not not_intersect:
for key in keys1:
if key == 'author':
author = merge_author(entry1[key], entry1['author_norm'], entry2[key], entry2['author_norm'])
author_norm = normalize_author(str(author))
entries = add_field(entries, key, author)
entries = add_field(entries, 'author_norm', author_norm)
if key == 'editor':
editor = merge_author(entry1[key], entry1['editor_norm'], entry2[key], entry2['editor_norm'])
editor_norm = normalize_author(str(editor))
entries = add_field(entries, key, editor)
entries = add_field(entries, 'editor_norm', editor_norm)
elif key == 'keywords' or key == 'topics':
entries = add_field(entries, key, merge_keywords(entry1[key], entry2[key]))
elif key == 'month':
entries = add_field(entries, key, entry1[key])
elif len(entry1[key]) == len(entry2[key]) or len(entry1[key]) < len(entry2[key]):
entries = add_field(entries, key, entry2[key])
else:
entries = add_field(entries, key, entry1[key])
else:
#All the keys in the two entries aren't the same, so some need to be merged
#some can just be written
#print "Entries are not the same!"
#print keys1, keys2
for key in intersection:
if key == 'author':
author = merge_author(entry1[key], entry1['author_norm'], entry2[key], entry2['author_norm'])
entries = add_field(entries, key, author)
if key == 'editor':
editor = merge_author(entry1[key], entry1['editor_norm'], entry2[key], entry2['editor_norm'])
entries = add_field(entries, key, editor)
elif key == 'keywords' or key == 'topics':
entries = add_field(entries, key, merge_keywords(entry1[key], entry2[key]))
elif key == 'month':
entries = add_field(entries, key, entry1[key])
elif key == 'doi':
entries = add_field(entries, get_keycount(intersection, key), entry1[key])
elif len(entry1[key]) == len(entry2[key]) or len(entry1[key]) < len(entry2[key]):
entries = add_field(entries, key, entry2[key])
else:
entries = add_field(entries, key, entry1[key])
for key in not_intersect:
if key in keys1:
entries = add_field(entries, key, entry1[key])
elif key in keys2:
entries = add_field(entries, key, entry2[key])
db.entries = [entries]
return db
示例4: exif_pdf
def exif_pdf(self, filename):
fields = ["Author", "Year", "Journal", "Title", "Publisher",
"Page", "Address", "Annote", "Booktitle", "Chapter",
"Crossred", "Edition", "Editor", "HowPublished",
"Institution", "Month", "Note", "Number",
"Organization", "Pages", "School",
"Series", "Type", "Url", "Volume", "Doi", "File"]
op=pexif.get_json(filename)
try:
new_op = {
field: str(value) for field in fields
for key, value in op[0].items() if field.lower() in key.lower()
}
if 'Author' not in new_op:
new_op['Author'] = 'Unknown'
id_auth=new_op["Author"].split()[-1]
id_tit = (new_op["Title"].split()[:2])
id_tit.append(id_auth)
id_val = "_".join(id_tit)
new_op["ID"] = str(id_val)
new_op["ENTRYTYPE"] = "article"
op[0] = new_op
db = BibDatabase()
db.entries = op
writer = BibTexWriter()
pdf_buff = (writer.write(db))
self.create_textview(pdf_buff)
except:
self.Messages.on_error_clicked("Can't extract data from this pdf file", "Try other methods")
示例5: merge_folder_tree
def merge_folder_tree(folder, use_backup):
"""
Merge bib files from the current subtree into a master bib file at the root.
This function updates the 'file' link of each entry with the relative path
to each subfolder that has been processed.
Args:
folder (str): relative or absolute path of the folder to process.
Returns:
Nothing, but creates a file named `master.bib` in the given folder.
"""
db = BibDatabase()
for subdir, _dirs, _files in os.walk(os.path.abspath(folder)):
if os.path.exists(os.path.join(subdir, '.nobib')):
continue # Skip blacklisted folders
reldir = os.path.relpath(subdir, os.path.abspath(folder))
bib_path = os.path.join(subdir, 'biblio.bib')
subdb = utils.read_bib_file(bib_path)
for entry in subdb.entries:
filename = utils.decode_filename_field(entry['file'])
filename = os.path.join(reldir, filename)
entry['file'] = utils.encode_filename_field(filename)
db.entries += subdb.entries
# Remove duplicated entries
entries_dict = db.entries_dict
db.entries = [val for key, val in entries_dict.items()]
# Write result
bib_path = os.path.join(folder, 'master.bib')
utils.write_with_backup(bib_path, utils.write_bib(db, order=True), use_backup)
示例6: format_paper_citation_dict
def format_paper_citation_dict(citation, indent=' '):
"""
Format a citation dict for a paper or a list of papers into a BibTeX
record string.
:param citation: A ``Paper`` citation dict or list of such dicts.
:param indent: Indentation to be used in BibTeX output.
"""
if isinstance(citation, dict):
entries = [citation]
else:
entries = citation
# Handle conflicting ids for entries
entries_ids = collections.defaultdict(lambda: 0)
for entry in entries:
entry_id = entry['ID']
entries_ids[entry_id] += 1
if entries_ids[entry_id] > 1:
entry['ID'] = '%s_%s' % (entry_id, entries_ids[entry_id])
writer = BibTexWriter()
writer.indent = indent
with io.StringIO('') as bibfile:
db = BibDatabase()
db.entries = entries
bibfile.write(writer.write(db))
return bibfile.getvalue().strip()
示例7: test_write_dependent_strings
def test_write_dependent_strings(self):
bib_database = BibDatabase()
bib_database.strings['title'] = 'Mr'
expr = BibDataStringExpression([BibDataString(bib_database, 'title'), 'Smith'])
bib_database.strings['name'] = expr
result = bibtexparser.dumps(bib_database)
expected = '@string{title = {Mr}}\n\[email protected]{name = title # {Smith}}\n\n'
self.assertEqual(result, expected)
示例8: test_write_common_strings
def test_write_common_strings(self):
bib_database = BibDatabase()
bib_database.load_common_strings()
writer = BibTexWriter(write_common_strings=True)
result = bibtexparser.dumps(bib_database, writer=writer)
with io.open('bibtexparser/tests/data/common_strings.bib') as f:
expected = f.read()
self.assertEqual(result, expected)
示例9: test_align
def test_align(self):
bib_database = BibDatabase()
bib_database.entries = [{'ID': 'abc123',
'ENTRYTYPE': 'book',
'author': 'test',
'thisisaverylongkey': 'longvalue'}]
writer = BibTexWriter()
writer.align_values = True
result = bibtexparser.dumps(bib_database, writer)
expected = \
"""@book{abc123,
author = {test},
thisisaverylongkey = {longvalue}
}
"""
self.assertEqual(result, expected)
with open('bibtexparser/tests/data/multiple_entries_and_comments.bib') as bibtex_file:
bib_database = bibtexparser.load(bibtex_file)
writer = BibTexWriter()
writer.contents = ['entries']
writer.align_values = True
result = bibtexparser.dumps(bib_database, writer)
expected = \
"""@book{Toto3000,
author = {Toto, A and Titi, B},
title = {A title}
}
@article{Wigner1938,
author = {Wigner, E.},
doi = {10.1039/TF9383400029},
issn = {0014-7672},
journal = {Trans. Faraday Soc.},
owner = {fr},
pages = {29--41},
publisher = {The Royal Society of Chemistry},
title = {The transition state method},
volume = {34},
year = {1938}
}
@book{Yablon2005,
author = {Yablon, A.D.},
publisher = {Springer},
title = {Optical fiber fusion slicing},
year = {2005}
}
"""
self.assertEqual(result, expected)
示例10: write_selected_to_file
def write_selected_to_file(selected):
db = BibDatabase()
result = []
for item in selected:
path = str(bib_dir) + str(files[item])
with open(path, 'r') as f:
db = bibtexparser.load(f)
result.append(db.entries[0])
db.entries = result
print db.entries
with open(website_dir, 'w') as f:
bibtexparser.dump(db, f)
subprocess.call(['bib2html', '-f', website_dir])
示例11: test_entry_separator
def test_entry_separator(self):
bib_database = BibDatabase()
bib_database.entries = [{'ID': 'abc123',
'ENTRYTYPE': 'book',
'author': 'test'}]
writer = BibTexWriter()
writer.entry_separator = ''
result = bibtexparser.dumps(bib_database, writer)
expected = \
"""@book{abc123,
author = {test}
}
"""
self.assertEqual(result, expected)
示例12: test_sort_missing_field
def test_sort_missing_field(self):
bib_database = BibDatabase()
bib_database.entries = [{'ID': 'b',
'ENTRYTYPE': 'article',
'year': '2000'},
{'ID': 'c',
'ENTRYTYPE': 'book',
'year': '2010'},
{'ID': 'a',
'ENTRYTYPE': 'book'}]
writer = BibTexWriter()
writer.order_entries_by = ('year', )
result = bibtexparser.dumps(bib_database, writer)
expected = "@book{a\n}\n\[email protected]{b,\n year = {2000}\n}\n\[email protected]{c,\n year = {2010}\n}\n\n"
self.assertEqual(result, expected)
示例13: test_indent
def test_indent(self):
bib_database = BibDatabase()
bib_database.entries = [{'id': 'abc123',
'type': 'book',
'author': 'test'}]
writer = BibTexWriter()
writer.indent = ' '
result = bibtexparser.dumps(bib_database, writer)
expected = \
"""@book{abc123,
author = {test}
}
"""
self.assertEqual(result, expected)
示例14: __str__
def __str__(self):
bib = BibDatabase()
bib.entries = [{
'ENTRYTYPE': 'article',
'ID': self.entry_number,
'author': self.author,
'journal': self.journal,
'title': self.title,
'year': self.year,
'volume': self.volume,
'number': self.number,
'pages': self.pages,
'abstract': self.abstract,
'keyword': self.keyword,
'doi': self.doi,
'issn': self.issn
}]
return bibtexparser.dumps(bib)
示例15: __init__
def __init__(self, data=None,
customization=None,
ignore_nonstandard_types=True,
homogenize_fields=False,
interpolate_strings=True,
common_strings=False):
"""
Creates a parser for rading BibTeX files
:return: parser
:rtype: `BibTexParser`
"""
self.bib_database = BibDatabase()
#: Load common strings such as months abbreviation
#: Default: `False`.
self.common_strings = common_strings
if self.common_strings:
self.bib_database.load_common_strings()
#: Callback function to process BibTeX entries after parsing,
#: for example to create a list from a string with multiple values.
#: By default all BibTeX values are treated as simple strings.
#: Default: `None`.
self.customization = customization
#: Ignore non-standard BibTeX types (`book`, `article`, etc).
#: Default: `True`.
self.ignore_nonstandard_types = ignore_nonstandard_types
#: Sanitize BibTeX field names, for example change `url` to `link` etc.
#: Field names are always converted to lowercase names.
#: Default: `False`.
self.homogenize_fields = homogenize_fields
#: Interpolate Bibtex Strings or keep the structure
self.interpolate_strings = interpolate_strings
# On some sample data files, the character encoding detection simply
# hangs We are going to default to utf8, and mandate it.
self.encoding = 'utf8'
# pre-defined set of key changes
self.alt_dict = {
'keyw': u'keyword',
'keywords': u'keyword',
'authors': u'author',
'editors': u'editor',
'urls': u'url',
'link': u'url',
'links': u'url',
'subjects': u'subject'
}
# Setup the parser expression
self._init_expressions()