本文整理汇总了Python中philologic.DB.DB类的典型用法代码示例。如果您正苦于以下问题:Python DB类的具体用法?Python DB怎么用?Python DB使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了DB类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_metadata_token_count
def get_metadata_token_count(environ,start_response):
status = '200 OK'
headers = [('Content-type', 'application/json; charset=UTF-8'),("Access-Control-Allow-Origin","*")]
start_response(status,headers)
config = f.WebConfig()
db = DB(config.db_path + '/data/')
request = WSGIHandler(db, environ)
results = ''
frequencies = json.loads(environ['wsgi.input'].read())
word_counts = []
c = db.dbh.cursor()
count = 0
for label, m in frequencies.iteritems():
args = []
query_metadata = {}
for metadata in m['metadata']:
query_metadata[metadata] = m['metadata'][metadata].encode('utf-8')
hits = db.query(**query_metadata)
total_count = 0
for hit in hits:
total_count += int(hit['word_count'])
try:
frequencies[label]['count'] = round(float(m['count']) / total_count * 1000000, 3)
except:
count += 1
frequencies[label]['count'] = 0
yield json.dumps(frequencies)
示例2: term_group
def term_group(environ, start_response):
status = '200 OK'
headers = [('Content-type', 'application/json; charset=UTF-8'),
("Access-Control-Allow-Origin", "*")]
start_response(status, headers)
config = WebConfig(os.path.abspath(os.path.dirname(__file__)).replace('scripts', ''))
db = DB(config.db_path + '/data/')
request = WSGIHandler(environ, config)
if not request["q"]:
dump = json.dumps({"original_query": "", "term_groups": []})
else:
hits = db.query(request["q"], request["method"], request["arg"], sort_order=request["sort_order"], **request.metadata)
parsed = parse_query(request.q)
group = group_terms(parsed)
all_groups = split_terms(group)
term_groups = []
for g in all_groups:
term_group = ''
not_started = False
for kind, term in g:
if kind == 'NOT':
if not_started is False:
not_started = True
term_group += ' NOT '
elif kind == 'OR':
term_group += '|'
elif kind == "TERM":
term_group += ' %s ' % term
elif kind == "QUOTE":
term_group += ' %s ' % term
term_group = term_group.strip()
term_groups.append(term_group)
dump = json.dumps({"term_groups": term_groups, "original_query": request.original_q})
yield dump.encode('utf8')
示例3: lookup_word_service
def lookup_word_service(environ, start_response):
status = "200 OK"
headers = [("Content-type", "application/json; charset=UTF-8"), ("Access-Control-Allow-Origin", "*")]
start_response(status, headers)
config = f.WebConfig()
db = DB(config.db_path + "/data/")
request = WSGIHandler(db, environ)
cursor = db.dbh.cursor()
if request.report == "concordance":
hits = db.query(request["q"], request["method"], request["arg"], **request.metadata)
context_size = config["concordance_length"] * 3
hit = hits[int(request.position)]
bytes = hit.bytes
hit_span = hit.bytes[-1] - hit.bytes[0]
length = context_size + hit_span + context_size
bytes, byte_start = adjust_bytes(bytes, length)
byte_end = byte_start + length
filename = hit.filename
token = request.selected
elif request.report == "navigation":
token = request.selected
philo_id = request.philo_id.split(" ")
text_obj = db[philo_id]
byte_start, byte_end = int(text_obj.byte_start), int(text_obj.byte_end)
filename = text_obj.filename
# print >> sys.stderr, "WORD LOOKUP FROM NAVIGATION", request.philo_id,request.selected, byte_start, byte_end, filename
else:
pass
# print >> sys.stderr, "TOKEN", token, "BYTES: ", byte_start, byte_end, "FILENAME: ", filename, "POSITION", request.position
token_n = 0
yield lookup_word(db, cursor, token, token_n, byte_start, byte_end, filename)
示例4: term_group
def term_group(environ, start_response):
status = '200 OK'
headers = [('Content-type', 'application/json; charset=UTF-8'),
("Access-Control-Allow-Origin", "*")]
start_response(status, headers)
config = f.WebConfig()
db = DB(config.db_path + '/data/')
request = WSGIHandler(db, environ)
hits = db.query(request["q"], request["method"], request["arg"],
**request.metadata)
parsed = parse_query(request.q)
group = group_terms(parsed)
all_groups = split_terms(group)
term_groups = []
for g in all_groups:
term_group = ''
not_started = False
for kind, term in g:
if kind == 'NOT':
if not_started == False:
not_started = True
term_group += ' NOT '
elif kind == 'OR':
term_group += '|'
elif kind == "TERM":
term_group += ' %s ' % term
elif kind == "QUOTE":
term_group += ' %s ' % term
term_group = term_group.strip()
term_groups.append(term_group)
yield json.dumps(term_groups)
示例5: lookup_word_service
def lookup_word_service(environ, start_response):
status = '200 OK'
headers = [('Content-type', 'application/json; charset=UTF-8'), ("Access-Control-Allow-Origin", "*")]
start_response(status, headers)
config = WebConfig(os.path.abspath(os.path.dirname(__file__)).replace('scripts', ''))
db = DB(config.db_path + '/data/')
request = WSGIHandler(environ, config)
cursor = db.dbh.cursor()
if request.report == "concordance":
hits = db.query(request["q"], request["method"], request["arg"], **request.metadata)
context_size = config['concordance_length'] * 3
hit = hits[int(request.position)]
bytes = hit.bytes
hit_span = hit.bytes[-1] - hit.bytes[0]
length = context_size + hit_span + context_size
bytes, start_byte = adjust_bytes(bytes, length)
end_byte = start_byte + length
filename = hit.filename
token = request.selected
elif request.report == "navigation":
token = request.selected
philo_id = request.philo_id.split(" ")
text_obj = db[philo_id]
start_byte, end_byte = int(text_obj.start_byte), int(text_obj.end_byte)
filename = text_obj.filename
# print >> sys.stderr, "WORD LOOKUP FROM NAVIGATION", request.philo_id,request.selected, start_byte, end_byte, filename
else:
pass
# print >> sys.stderr, "TOKEN", token, "BYTES: ", start_byte, end_byte, "FILENAME: ", filename, "POSITION", request.position
token_n = 0
yield lookup_word(db, cursor, token, token_n, start_byte, end_byte, filename)
示例6: landing_page_bibliography
def landing_page_bibliography(request, config):
db = DB(config.db_path + '/data/')
object_level = request.object_level
if object_level and object_level in ["doc", "div1", "div2", "div3"]:
hits = db.get_all(object_level)
else:
hits = db.get_all(db.locals['default_object_level'])
results = []
c = db.dbh.cursor()
for hit in hits:
hit_object = {}
for field in db.locals['metadata_fields']:
hit_object[field] = hit[field] or ''
if object_level == "doc":
hit_object['philo_id'] = hit.philo_id[0]
else:
hit_object['philo_id'] = '/'.join([str(i) for i in hit.philo_id])
doc_id = str(hit.philo_id[0]) + ' 0 0 0 0 0 0'
next_doc_id = str(hit.philo_id[0] + 1) + ' 0 0 0 0 0 0'
c.execute('select rowid from toms where philo_id="%s"' % doc_id)
doc_row = c.fetchone()['rowid']
c.execute('select rowid from toms where philo_id="%s"' % next_doc_id)
try:
next_doc_row = c.fetchone()['rowid']
except TypeError: # if this is the last doc, just get the last rowid in the table.
c.execute('select max(rowid) from toms;')
next_doc_row = c.fetchone()[0]
try:
c.execute(
'select * from toms where rowid between %d and %d and head is not null and head !="" limit 1'
% (doc_row, next_doc_row))
except sqlite3.OperationalError: # no type field in DB
c.execute(
'select * from toms where rowid between ? and ? and head is not null and head !="" limit 1',
(doc_row, next_doc_row))
try:
start_head = c.fetchone()['head'].decode('utf-8')
start_head = start_head.lower().title().encode('utf-8')
except Exception as e:
print(repr(e), file=sys.stderr)
start_head = ''
try:
c.execute(
'select head from toms where rowid between %d and %d and head is not null and head !="" order by rowid desc limit 1'
% (doc_row, next_doc_row))
except sqlite3.OperationalError: # no type field in DB
c.execute(
'select head from toms where rowid between %d and %d and head is not null and head !="" order by rowid desc limit 1'
% (doc_row, next_doc_row))
try:
end_head = c.fetchone()['head']
end_head = end_head.decode('utf-8').lower().title().encode('utf-8')
except:
end_head = ''
hit_object['start_head'] = start_head
hit_object['end_head'] = end_head
results.append(hit_object)
return results
示例7: bibliography_results
def bibliography_results(request, config):
"""Fetch bibliography results"""
db = DB(config.db_path + '/data/')
if request.no_metadata:
hits = db.get_all(db.locals['default_object_level'], request["sort_order"])
else:
hits = db.query(sort_order=request["sort_order"], **request.metadata)
if request.simple_bibliography == "all": # request from simple landing page report which gets all biblio in load order
hits.finish()
start = 1
end = len(hits)
page_num = end
else:
start, end, page_num = page_interval(request.results_per_page, hits, request.start, request.end)
bibliography_object = {
"description": {
"start": start,
"end": end,
"n": page_num,
"results_per_page": request.results_per_page
},
"query": dict([i for i in request]),
"default_object": db.locals['default_object_level']
}
results = []
result_type = "doc"
for hit in hits[start - 1:end]:
citation_hrefs = citation_links(db, config, hit)
metadata_fields = {}
for metadata in db.locals['metadata_fields']:
metadata_fields[metadata] = hit[metadata]
result_type = hit.object_type
if request.simple_bibliography == "all":
citation = citations(hit, citation_hrefs, config, report="simple_landing")
else:
citation = citations(hit, citation_hrefs, config, report="bibliography", result_type=result_type)
if config.dictionary_bibliography is False or result_type == "doc":
results.append({
'citation': citation,
'citation_links': citation_hrefs,
'philo_id': hit.philo_id,
"metadata_fields": metadata_fields,
"object_type": result_type
})
else:
context = get_text_obj(hit, config, request, db.locals["token_regex"], images=False)
results.append({
'citation': citation,
'citation_links': citation_hrefs,
'philo_id': hit.philo_id,
"metadata_fields": metadata_fields,
"context": context,
"object_type": result_type
})
bibliography_object["results"] = results
bibliography_object['results_length'] = len(hits)
bibliography_object['query_done'] = hits.done
bibliography_object['result_type'] = result_type
return bibliography_object, hits
示例8: get_frequency
def get_frequency(environ,start_response):
status = '200 OK'
headers = [('Content-type', 'application/json; charset=UTF-8'),("Access-Control-Allow-Origin","*")]
start_response(status,headers)
config = f.WebConfig()
db = DB(config.db_path + '/data/')
request = WSGIHandler(db, environ)
hits = db.query(request["q"],request["method"],request["arg"],**request.metadata)
field, word_frequency_object = generate_word_frequency(hits,request,db,config)
yield json.dumps(word_frequency_object, indent=2)
示例9: collocation
def collocation(environ,start_response):
config = f.WebConfig()
db = DB(config.db_path + '/data/')
request = WSGIHandler(db, environ)
headers = [('Content-type', 'application/json; charset=UTF-8'),("Access-Control-Allow-Origin","*")]
start_response('200 OK',headers)
hits = db.query(request["q"],"cooc",request["arg"],**request.metadata)
hits.finish()
collocation_object = fetch_collocation(hits, request, db, config)
yield json.dumps(collocation_object)
示例10: word_property_filter
def word_property_filter(environ,start_response):
config = f.WebConfig()
db = DB(config.db_path + '/data/')
request = WSGIHandler(db, environ)
headers = [('Content-type', 'application/json; charset=UTF-8'),("Access-Control-Allow-Origin","*")]
start_response('200 OK',headers)
hits = db.query(request["q"],request["method"],request["arg"],**request.metadata)
filter_results = filter_words_by_property(hits, config.db_path, request, db, config)
yield json.dumps(filter_results)
示例11: term_list
def term_list(environ, start_response):
status = "200 OK"
headers = [("Content-type", "application/json; charset=UTF-8"), ("Access-Control-Allow-Origin", "*")]
start_response(status, headers)
config = f.WebConfig()
db = DB(config.db_path + "/data/")
q = WSGIHandler(db, environ)
hits = db.query(q["q"], q["method"], q["arg"], **q.metadata)
expanded_terms = get_expanded_query(hits)
yield json.dumps(expanded_terms[0])
示例12: get_more_context
def get_more_context(environ, start_response):
status = "200 OK"
headers = [("Content-type", "application/json; charset=UTF-8"), ("Access-Control-Allow-Origin", "*")]
start_response(status, headers)
config = f.WebConfig()
db = DB(config.db_path + "/data/")
request = WSGIHandler(db, environ)
hit_num = int(request.hit_num)
hits = db.query(request["q"], request["method"], request["arg"], **request.metadata)
context_size = config["concordance_length"] * 3
hit_context = r.fetch_concordance(db, hits[hit_num], config.db_path, context_size)
yield json.dumps(hit_context)
示例13: generate_word_frequency
def generate_word_frequency(request, config):
"""reads through a hitlist. looks up request["field"] in each hit, and builds up a list of
unique values and their frequencies."""
db = DB(config.db_path + '/data/')
hits = db.query(request["q"], request["method"], request["arg"], **request.metadata)
field = request["field"]
counts = {}
frequency_object = {}
more_results = True
start_time = timeit.default_timer()
last_hit_done = request.start
try:
for n in hits[request.start:]:
key = get_word_attrib(n, field, db)
if not key:
# NULL is a magic value for queries, don't change it
# recklessly.
key = "NULL"
if key not in counts:
counts[key] = 0
counts[key] += 1
elapsed = timeit.default_timer() - start_time
last_hit_done += 1
if elapsed > 5:
break
table = {}
for k, v in counts.iteritems():
url = make_absolute_query_link(config,
request,
start="0",
end="0",
report="word_property_filter",
word_property=field,
word_property_value=k)
table[k] = {'count': v, 'url': url}
frequency_object['results'] = table
frequency_object["hits_done"] = last_hit_done
if last_hit_done == len(hits):
frequency_object['more_results'] = False
else:
frequency_object['more_results'] = True
except IndexError:
frequency_object['results'] = {}
frequency_object['more_results'] = False
frequency_object['results_length'] = len(hits)
frequency_object['query'] = dict([i for i in request])
return frequency_object
示例14: term_list
def term_list(environ, start_response):
status = '200 OK'
headers = [('Content-type', 'application/json; charset=UTF-8'),
("Access-Control-Allow-Origin", "*")]
start_response(status, headers)
config = f.WebConfig()
db = DB(config.db_path + '/data/')
request = WSGIHandler(db, environ)
hits = db.query(request["q"], request["method"], request["arg"],
**request.metadata)
hits.finish()
expanded_terms = get_expanded_query(hits)
yield json.dumps(expanded_terms[0])
示例15: get_bibliography
def get_bibliography(environ,start_response):
status = '200 OK'
headers = [('Content-type', 'application/json; charset=UTF-8'),("Access-Control-Allow-Origin","*")]
start_response(status,headers)
config = f.WebConfig()
db = DB(config.db_path + '/data/')
request = WSGIHandler(db, environ)
object_level = request.object_level
if object_level and object_level in object_levels:
hits = db.get_all(object_level)
else:
hits = db.get_all(db.locals['default_object_level'])
results = []
c = db.dbh.cursor()
for hit in hits:
hit_object = {}
for field in db.locals['metadata_fields']:
hit_object[field] = hit[field] or ''
if object_level == "doc":
hit_object['philo_id'] = hit.philo_id[0]
else:
hit_object['philo_id'] = '/'.join([str(i) for i in hit.philo_id])
doc_id = str(hit.philo_id[0]) + ' 0 0 0 0 0 0'
next_doc_id = str(hit.philo_id[0] + 1) + ' 0 0 0 0 0 0'
c.execute('select rowid from toms where philo_id="%s"' % doc_id)
doc_row = c.fetchone()['rowid']
c.execute('select rowid from toms where philo_id="%s"' % next_doc_id)
try:
next_doc_row = c.fetchone()['rowid']
except TypeError: # if this is the last doc, just get the last rowid in the table.
c.execute('select max(rowid) from toms;')
next_doc_row = c.fetchone()[0]
c.execute('select head from toms where rowid between %d and %d and head is not null limit 1' % (doc_row, next_doc_row))
try:
start_head = c.fetchone()['head']
start_head = start_head.decode('utf-8').lower().title().encode('utf-8')
except:
start_head = ''
c.execute('select head from toms where rowid between %d and %d and head is not null order by rowid desc limit 1' % (doc_row, next_doc_row))
try:
end_head = c.fetchone()['head']
end_head = end_head.decode('utf-8').lower().title().encode('utf-8')
except:
end_head = ''
hit_object['start_head'] = start_head
hit_object['end_head'] = end_head
results.append(hit_object)
yield json.dumps(results)