本文整理汇总了Python中quanthistling.model.meta.Session.close方法的典型用法代码示例。如果您正苦于以下问题:Python Session.close方法的具体用法?Python Session.close怎么用?Python Session.close使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类quanthistling.model.meta.Session
的用法示例。
在下文中一共展示了Session.close方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
# 需要导入模块: from quanthistling.model.meta import Session [as 别名]
# 或者: from quanthistling.model.meta.Session import close [as 别名]
#.........这里部分代码省略.........
page_found = True
if page_new != page:
page_change = True
log.info("Page is now: " + str(page_new))
elif re.match(r'^\[BILD\]', l):
pass
# parse data
elif (page_new >= data['startpage']) and (page_new <= data['endpage']):
# new entry starts, process previous entry
if (re.search(r'<subentry/>', l) or re.search(r'<mainentry/>', l)) and current_entry_text != '':
current_entry_text = re.sub(r'[\f\n]*$', '', current_entry_text)
entry = importfunctions.process_line(current_entry_text)
# add additional entry data
entry.startpage = current_entry_start_page
entry.endpage = page
entry.startcolumn = current_entry_start_column
entry.endcolumn = column
entry.pos_on_page = current_entry_pos_on_page
entry.dictdata = dictdata
entry.book = book
entry.is_subentry = is_subentry
entry.is_subentry_of_entry_id = current_mainentry_id
Session.add(entry)
pos_on_page = pos_on_page + 1
if not is_subentry:
# add start letter
#if (entry.head != None) and (entry.head != ''):
# startletters.add(entry.head[0].lower())
# set new main entry id
Session.commit()
current_mainentry_id = entry.id
# only change page and column after processing possible new entry at page
# and column start
if page_change:
page = page_new
page_change = False
pos_on_page = 1
if column_change:
column = column_new
column_change = False
# line is start of a subentry
if re.search(r'<subentry/>', l):
is_subentry = True
l = re.sub(r'<subentry/>', '', l)
current_entry_text = ''
current_entry_start_page = page
current_entry_page = page
current_entry_start_column = column
current_entry_column = column
current_entry_pos_on_page = pos_on_page
# line is start of a main entry
elif re.search(r'<mainentry/>', l):
is_subentry = False
l = re.sub(r'<mainentry/>', '', l)
current_mainentry_id = 0
current_entry_text = ''
current_entry_start_page = page
current_entry_page = page
current_entry_start_column = column
current_entry_column = column
current_entry_pos_on_page = pos_on_page
# add page break
if page != current_entry_page:
current_entry_text = current_entry_text + "\f"
current_entry_page = page
current_entry_column = column
elif column != current_entry_column:
current_entry_text = current_entry_text + "\f"
current_entry_column = column
# add current line to current entry
current_entry_text = current_entry_text + l + "\n"
# Add last entry from file
current_entry_text = re.sub(r'[\f\n]*$', '', current_entry_text)
entry = importfunctions.process_line(current_entry_text)
# add additional entry data
entry.startpage = current_entry_start_page
entry.endpage = page
entry.startcolumn = current_entry_start_column
entry.endcolumn = column
entry.pos_on_page = current_entry_pos_on_page
entry.dictdata = dictdata
entry.is_subentry = is_subentry
entry.is_subentry_of_entry_id = current_mainentry_id
Session.add(entry)
f1.close()
#dictdata.startletters = unicode(repr(sorted(list(startletters))))
Session.commit()
Session.commit()
Session.close()
示例2: main
# 需要导入模块: from quanthistling.model.meta import Session [as 别名]
# 或者: from quanthistling.model.meta.Session import close [as 别名]
#.........这里部分代码省略.........
for tb in files:
importfunctions.delete_book_from_db(Session, tb['bibtex_key'])
book = importfunctions.insert_book_to_db(Session, tb)
for data in tb['dictdata']:
dictdata = importfunctions.insert_dictdata_to_db(Session, data, book)
print("Processing file {0}.".format(data['file']))
encoding = "utf-8"
if "encoding" in data:
encoding = data["encoding"]
with codecs.open(os.path.join(dictdata_path, data['file']), "r", encoding) as f:
while f.next().strip():
pass
entry_string = ""
i = 1
for line in f:
line = importfunctions.normalize_stroke(line)
line = unicodedata.normalize("NFD", line)
if line.strip():
entry_string += line
else:
if "charmapping" in data:
for a in data["applycharmappingon"]:
match_a = re.search("\\\\{0}[^\n]*\n".format(a), entry_string)
if match_a:
string = match_a.group(0)
for char_original in data["charmapping"]:
string = re.sub(char_original, data["charmapping"][char_original], string)
entry_string = entry_string[:match_a.start(0)] + string + entry_string[match_a.end(0):]
entry = model.Entry()
annotations = []
annotations_subentry = []
fullentry = ""
for char in entry_string:
if char == '\n':
pos = len(fullentry)
annotations.append([pos, pos, u'newline', u'pagelayout'])
elif char == '\t':
pos = len(fullentry)
annotations.append([pos, pos, u'tab', u'pagelayout'])
elif char == "\r":
pass
else:
fullentry = fullentry + char
sorted_newlines = sorted([ a[0] for a in annotations if a[2] == "newline" ])
entry.fullentry = fullentry
for a in data['annotations']:
for match_a in re.finditer("\\\\{0} ".format(a), fullentry):
a_start = match_a.end(0)
a_end = next(x for x in sorted_newlines if x > a_start)
if data['annotations'][a].startswith("subentry-"):
annotations_subentry.append([a_start, a_end, data['annotations'][a][9:], u'dictinterpretation'])
else:
annotations.append([a_start, a_end, data['annotations'][a], u'dictinterpretation'])
annotations = clean_and_split_translations(fullentry, annotations)
for a in annotations:
entry.append_annotation(a[0], a[1], a[2], a[3])
entry.startpage = 1
entry.endpage = 1
entry.startcolumn = 1
entry.endcolumn = 1
entry.pos_on_page = i
entry.dictdata = dictdata
entry.book = book
entry.is_subentry = False
entry.is_subentry_of_entry_id = None
Session.add(entry)
i += 1
if annotations_subentry:
annotations_subentry = clean_and_split_translations(fullentry, annotations_subentry)
Session.commit()
subentry = model.Entry()
subentry.fullentry = fullentry
for a in annotations_subentry:
subentry.append_annotation(a[0], a[1], a[2], a[3])
subentry.startpage = 1
subentry.endpage = 1
subentry.startcolumn = 1
subentry.endcolumn = 1
subentry.pos_on_page = i
subentry.dictdata = dictdata
entry.book = book
subentry.is_subentry = True
subentry.is_subentry_of_entry_id = entry.id
Session.add(subentry)
i += 1
entry_string = ""
Session.commit()
Session.close()
示例3: main
# 需要导入模块: from quanthistling.model.meta import Session [as 别名]
# 或者: from quanthistling.model.meta.Session import close [as 别名]
def main(argv):
if len(argv) < 2:
print "call: importdictdata.py ini_file"
exit(1)
ini_file = argv[1]
dictdata_path = 'quanthistling/dictdata'
log = logging.getLogger()
logging.basicConfig(level=logging.INFO)
conf = appconfig('config:' + ini_file, relative_to='.')
if not pylons.test.pylonsapp:
load_environment(conf.global_conf, conf.local_conf)
# Create the tables if they don't already exist
metadata.create_all(bind=Session.bind)
Session.query(model.Annotation).delete()
Session.query(model.WordlistAnnotation).delete()
Session.query(model.Entry).delete()
Session.query(model.WordlistEntry).delete()
Session.query(model.Wordlistdata).delete()
# delete languages
Session.query(model.LanguageTgt).delete()
Session.query(model.LanguageSrc).delete()
Session.query(model.LanguageIso).delete()
Session.query(model.LanguageBookname).delete()
#Session.query(model.LanguageWordlistdata).delete()
Session.query(model.Dictdata).delete()
Session.query(model.Nondictdata).delete()
Session.query(model.Book).delete()
Session.query(model.Component).delete()
# delete annotationtypes
Session.query(model.Annotationtype).delete()
# insert languages
for l in quanthistling.dictdata.languages.list:
language = model.LanguageIso()
language.name = l['name']
language.langcode = l['langcode']
language.description = l['description']
language.url = l['url']
Session.add(language)
Session.commit()
log.info("Inserted language " + l['name'] + ".")
# insert components
for c in quanthistling.dictdata.components.list:
component = model.Component()
component.name = c['name']
component.description = c['description']
Session.add(component)
Session.commit()
log.info("Inserted component " + c['name'] + ".")
# insert annotationtypes
for at in quanthistling.dictdata.annotationtypes.list:
annotationtype = model.Annotationtype()
annotationtype.type = at['type']
annotationtype.description = at['description']
Session.add(annotationtype)
Session.commit()
log.info("Inserted annotationtype " + at['type'] + ".")
Session.close()
for b in quanthistling.dictdata.books.list:
importbook.main(['importbook.py', b['bibtex_key'], ini_file])
if os.path.exists(
os.path.join(
"scripts", "annotations", "annotations_for_{0}.py".format(
b['bibtex_key']))):
log.info("Parsing annotations for " + b['bibtex_key'] + "...")
eval("annotations_for_%s.main(['annotations_for_%s.py', ini_file])" % (b['bibtex_key'], b['bibtex_key']))
else:
log.info("No annotation script found for " + b['bibtex_key'] + "...")
#for b in quanthistling.dictdata.wordlistbooks.list:
importhuber1992.main(['importhuber1992.py', ini_file])
importzgraggen1980.main(['importzgraggen1980.py', ini_file])
importtoolboxfile.main(['importtoolboxfile.py', 'all', ini_file])
示例4: main
# 需要导入模块: from quanthistling.model.meta import Session [as 别名]
# 或者: from quanthistling.model.meta.Session import close [as 别名]
#.........这里部分代码省略.........
elif len(parts) == 2:
if parts[1] in languages_list:
fullentry = ""
else:
fullentry = parts[1]
else:
continue
fullentry = re.sub("^-- ", "", fullentry)
entry[parts[0]] = {}
annotation[parts[0]] = []
entry[parts[0]]['fullentry'] = l
entry[parts[0]]['pos_on_page'] = pos_on_page
start_entry = len(l) - len(fullentry)
end_entry = len(l)
start_new = 0
if len(fullentry) > 0:
for match in re.finditer(u"(?:[,;] |$)", fullentry):
mybreak = False
# are we in a bracket?
for m in re.finditer(r'\(.*?\)', fullentry):
if match.start(0) > m.start(0) and match.end(0) < m.end(0):
mybreak = True
if not mybreak:
end_new = match.start(0)
match_bracket = re.search(" ?\(([^)]*)\) ?$", fullentry[start_new:end_new])
if match_bracket:
# if there is a number in the bracket then remove it
if re.search("(?:[\dmfv]|pl|sg)", match_bracket.group(1)):
end_new = end_new - len(match_bracket.group(0))
match_dashes1 = re.search("^--? ?", fullentry[start_new:end_new])
if match_dashes1:
start_new = start_new + len(match_dashes1.group(0))
match_dashes2 = re.search("--?,?$", fullentry[start_new:end_new])
if match_dashes2:
end_new = end_new - len(match_dashes2.group(0))
match_bracket2 = re.search("\(([^)]*)\)", fullentry[start_new:end_new])
#if concept == 'TAIL' and parts[0] == 'C25':
# print
# print fullentry[start_new:end_new].encode("utf-8")
# print annotation[parts[0]]
# print
if match_bracket2:
a = {}
a['start'] = start_entry + start_new
a['end'] = start_entry + end_new
a['value'] = 'counterpart'
a['type'] = 'dictinterpretation'
annotation_string = fullentry[start_new:start_new+match_bracket2.start(0)] + match_bracket2.group(1) + fullentry[start_new + match_bracket2.end(0):end_new]
annotation_string = re_html.sub("", annotation_string)
annotation_string = re_singledash.sub("", annotation_string)
#if concept == 'TAIL' and parts[0] == 'C25':
# print annotation_string
a['string'] = annotation_string
annotation[parts[0]].append(a)
a2 = {}
a2['start'] = start_entry + start_new
a2['end'] = start_entry + end_new
a2['value'] = 'counterpart'
a2['type'] = 'dictinterpretation'
annotation_string = fullentry[start_new:start_new+match_bracket2.start(0)] + fullentry[start_new+match_bracket2.end(0):end_new]
annotation_string = re_html.sub("", annotation_string)
annotation_string = re_singledash.sub("", annotation_string)
a2['string'] = annotation_string
annotation[parts[0]].append(a2)
else:
a = {}
a['start'] = start_entry + start_new
a['end'] = start_entry + end_new
a['value'] = 'counterpart'
a['type'] = 'dictinterpretation'
annotation_string = fullentry[start_new:end_new]
annotation_string = re_html.sub("", annotation_string)
annotation_string = re_singledash.sub("", annotation_string)
a['string'] = annotation_string
annotation[parts[0]].append(a)
start_new = match.end(0)
pos_on_page += 1
Session.commit()
wordlistfile.close()
Session.close()
示例5: main
# 需要导入模块: from quanthistling.model.meta import Session [as 别名]
# 或者: from quanthistling.model.meta.Session import close [as 别名]
#.........这里部分代码省略.........
Session, data, book)
wordlistfile = codecs.open(os.path.join(
dictdata_path, wordlistbookdata["file"]), "r", "utf-8")
page = 0
pos_on_page = 1
counterparts = {}
part = None
lang = None
for line in wordlistfile:
l = line.strip()
if re.search(u'^<p>', l):
l = re.sub(u'</?p>', '', l)
match_part = re.search("^<i>([^\.]+)\.</i>$", l)
if match_part:
part = match_part.group(1)
continue
lang = unicodedata.normalize("NFD", data["language_bookname"])
if re_page.match(l):
match_page = re_page.match(l)
page = int(match_page.group(1))
pos_on_page = 1
if page >= int(data["startpage"]) and \
page <= int(data["endpage"]):
print "Parsing page {0}".format(page)
else:
if page >= int(data["startpage"]) and \
page <= int(data["endpage"]):
if not part == lang and lang != "Portuguese":
continue
parts = l.split("\t")
if len(parts) != 4 and len(parts) != 2:
print("wrong number of tabs on page {0} pos {1}".format(page, pos_on_page))
print(l)
print(parts)
continue
for i in range(len(parts) / 2):
concept = conceptid_from_string(parts[i*2])
if concept == u"":
print("No concept")
print(l)
print(parts)
continue
parts_index = i*2+1
if data["language_name"] == u"Portuguese":
parts_index = i*2
counterpart = parts[parts_index]
counterparts[concept] = \
(counterpart, page, parts_index + 1,
pos_on_page + parts_index)
pos_on_page += len(parts)
print(" DONE language {0}.".format(lang.encode("utf-8")))
# store concepts
if data["language_name"] == u"Portuguese":
for concept_id, _ in counterparts.items():
# add concept to db if it is not there
concept_db = model.meta.Session.query(
model.WordlistConcept).filter_by(
concept=concept_id).first()
if concept_db == None:
concept_db = model.WordlistConcept()
concept_db.concept = concept_id
Session.add(concept_db)
Session.commit()
# store entries
for concept_id, v in counterparts.items():
entry_db = importfunctions.process_line(v[0], "wordlist")
entry_db.wordlistdata = wordlistdata
entry_db.pos_on_page = v[3]
entry_db.startpage = v[1]
entry_db.endpage = v[1]
entry_db.startcolumn = v[2]
entry_db.endcolumn = v[2]
concept_db = model.meta.Session.query(
model.WordlistConcept).filter_by(concept=concept_id).first()
if concept_db is None:
print(u"Concept not found: {0}".format(concept_id))
entry_db.concept = concept_db
inserted = insert_counterpart(entry_db, 0, len(v[0]), data)
Session.commit()
Session.close()
示例6: main
# 需要导入模块: from quanthistling.model.meta import Session [as 别名]
# 或者: from quanthistling.model.meta.Session import close [as 别名]
#.........这里部分代码省略.........
# Create the tables if they don't already exist
metadata.create_all(bind=Session.bind)
for b in quanthistling.dictdata.wordlistbooks.list:
if b['bibtex_key'] == book_bibtex_key:
wordlistbookdata = b
importfunctions.delete_book_from_db(Session, book_bibtex_key)
book = importfunctions.insert_book_to_db(Session, wordlistbookdata)
concepts = {}
print "Parsing {0}...".format(book_bibtex_key)
for data in wordlistbookdata["nonwordlistdata"]:
nonwordlistdata = importfunctions.insert_nonwordlistdata_to_db(
Session, data, book, os.path.join(dictdata_path, data['file']))
for data in wordlistbookdata["wordlistdata"]:
wordlistdata = importfunctions.insert_wordlistdata_to_db(
Session, data, book)
wordlistfile = codecs.open(os.path.join(
dictdata_path, wordlistbookdata["file"]), "r", "utf-8")
page = 0
pos_on_page = 1
counterparts = {}
for line in wordlistfile:
l = line.strip()
if re.search(u'^<p>', l):
l = re.sub(u'</?p>', '', l)
if re_page.match(l):
match_page = re_page.match(l)
page = int(match_page.group(1))
pos_on_page = 1
print "Parsing page {0}".format(page)
match_number = re.match("(\d{1,3})\. ?", l)
if match_number:
concept_nr = int(match_number.group(1))
l = l[len(match_number.group(0)):]
parts = l.split("\t")
parts_index = map_tabs[data["language_bookname"]]
if parts_index < len(parts):
c = parts[parts_index]
counterparts[concept_nr] = \
(c, page, parts_index + 1,
pos_on_page + parts_index)
pos_on_page += len(parts)
# store concepts
if data["language_name"] == u"Portuguese":
for k, v in counterparts.items():
concept = v[0].upper()
concept = re.sub(u" ", u"_", concept)
concept = re.sub(u"'", u"_", concept)
concept = re.sub(u"/", u"_", concept)
concept = re.sub(u"\(", u"", concept)
concept = re.sub(u"\)", u"", concept)
concept = re.sub(u"\?", u"", concept)
concept_id = u"{0}".format(concept)
concepts[k] = concept_id
# add concept to db if it is not there
concept_db = model.meta.Session.query(
model.WordlistConcept).filter_by(
concept=concept_id).first()
if concept_db == None:
concept_db = model.WordlistConcept()
concept_db.concept = concept_id
Session.add(concept_db)
Session.commit()
# store entries
for k, v in counterparts.items():
entry_db = importfunctions.process_line(v[0], "wordlist")
entry_db.wordlistdata = wordlistdata
entry_db.pos_on_page = v[3]
entry_db.startpage = v[1]
entry_db.endpage = v[1]
entry_db.startcolumn = v[2]
entry_db.endcolumn = v[2]
concept_id = concepts[k]
concept_db = model.meta.Session.query(
model.WordlistConcept).filter_by(concept=concept_id).first()
if concept_db is None:
print("Concept not found: {0}".format(concept_id))
entry_db.concept = concept_db
inserted = insert_counterpart(entry_db, 0, len(c), data)
Session.commit()
Session.close()
示例7: main
# 需要导入模块: from quanthistling.model.meta import Session [as 别名]
# 或者: from quanthistling.model.meta.Session import close [as 别名]
#.........这里部分代码省略.........
for b in quanthistling.dictdata.wordlistbooks.list:
if b["bibtex_key"] == book_bibtex_key:
wordlistbookdata = b
wordlistbookdata['bibtex_key'] = combined_bibtex_key
for data in wordlistbookdata["nonwordlistdata"]:
data["volume"] = volume
nonwordlistdata = importfunctions.insert_nonwordlistdata_to_db(
Session, data, book, os.path.join(dictdata_path, data['file']))
for data in wordlistbookdata["wordlistdata"]:
wordlistdata = importfunctions.insert_wordlistdata_to_db(
Session, data, book)
wordlistfile = codecs.open(os.path.join(
dictdata_path, wordlistbookdata["file"]), "r", "utf-8")
page = 0
pos_on_page = 1
counterparts = {}
for line in wordlistfile:
l = line.strip()
l = substitute_characters(data["language_name"], l)
if re.search(u'^<p>', l):
l = re.sub(u'</?p>', '', l)
if re_page.match(l):
match_page = re_page.match(l)
page = int(match_page.group(1))
pos_on_page = 1
if page >= int(data["startpage"]) and \
page <= int(data["endpage"]):
print "Parsing page {0}".format(page)
else:
if page >= int(data["startpage"]) and \
page <= int(data["endpage"]):
parts = l.split("\t")
for column, p in enumerate(parts):
match_number = re.match("(\d{1,3})\. ?", p)
if match_number:
counterparts[int(match_number.group(1))] = \
(p, len(match_number.group(0)), page,
column, pos_on_page)
pos_on_page += 1
# store concepts
if data["language_name"] == u"English":
for k, v in counterparts.items():
concept = v[0][v[1]:].upper()
if k == 232:
concept = "MAT (TABARME)"
elif k == 233:
concept = "MAT (ZANA)"
concept = re.sub(u" ", u"_", concept)
concept = re.sub(u"'", u"_", concept)
concept = re.sub(u"\(", u"", concept)
concept = re.sub(u"\)", u"", concept)
concept = re.sub(u"\?", u"", concept)
concept_id = u"{0}".format(concept)
concepts[k] = concept_id
# add concept to db if it is not there
concept_db = model.meta.Session.query(
model.WordlistConcept).filter_by(
concept=concept_id).first()
if concept_db == None:
concept_db = model.WordlistConcept()
concept_db.concept = concept_id
Session.add(concept_db)
Session.commit()
entry = {}
for k, v in counterparts.items():
entry_db = importfunctions.process_line(v[0], "wordlist")
entry_db.wordlistdata = wordlistdata
entry_db.pos_on_page = v[4]
entry_db.startpage = v[2]
entry_db.endpage = v[2]
entry_db.startcolumn = v[3]
entry_db.endcolumn = v[3]
entry_db.volume = volume
concept_id = concepts[k]
concept_db = model.meta.Session.query(
model.WordlistConcept).filter_by(concept=concept_id).first()
if concept_db is None:
print("Concept not found: {0}".format(concept_id))
entry_db.concept = concept_db
s = v[1]
e = len(v[0])
inserted = insert_counterpart(entry_db, s, e, data)
Session.commit()
Session.commit()
Session.close()