本文整理汇总了Python中models.Person.buildMap方法的典型用法代码示例。如果您正苦于以下问题:Python Person.buildMap方法的具体用法?Python Person.buildMap怎么用?Python Person.buildMap使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类models.Person
的用法示例。
在下文中一共展示了Person.buildMap方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: stats_map
# 需要导入模块: from models import Person [as 别名]
# 或者: from models.Person import buildMap [as 别名]
def stats_map(data):
d = Person.buildMap(data)
logging.info("MAP: Got %s", str(d))
for k,v in d.items():
if not k or not v or k not in stats_attributes:
continue
v = v.replace("'",'').replace('"','')
logging.info("MAP GLOBAL: " + str(k) + ' --> ' + str(v))
yield ('global:' + k), {v:1}
r = {k:{}}
for k2,v2 in d.items():
if not k2 or not v2 or k2 not in stats_attributes:
continue
v2 = v2.replace("'",'').replace('"','')
# Ex: First name = Dan, Major = CS
# For the string 'Dan', when it is used as a first name,
# Has _x_ CS Majors
r[k][k2] = {v2:1}
s = str(v)
logging.info('MAP FINAL: ' + s + ' --> ' + str(r))
yield s,r
l = process_string(s)
if len(l) > 1:
for i in l:
yield i,r
示例2: get
# 需要导入模块: from models import Person [as 别名]
# 或者: from models.Person import buildMap [as 别名]
def get(self):
self.response.headers['Content-Type'] = 'text/plain'
search_query = str(urllib.unquote(cgi.escape(self.request.get('q')).lower()[:100]))
name = str(urllib.unquote(cgi.escape(self.request.get('name')).lower()[:50]))
token = str(urllib.unquote(cgi.escape(self.request.get('token'))))
page_num = parse_int(urllib.unquote(cgi.escape(self.request.get('page_num'))), 1)
page_size = parse_int(urllib.unquote(cgi.escape(self.request.get('page_size'))), 20)
if search_query + name == "":
d = {}
d['data'] = []
d['token'] = token
d['q'] = ""
s = json.dumps(d)
self.response.out.write(s)
return
if search_query == "":
search_query = name
if page_size > _PAGE_SIZE or page_size < 1:
page_size = _PAGE_SIZE
# Flood Prevention
ip = str(self.request.remote_addr)
ipCount = memcache.get(ip)
if ipCount is not None:
if ipCount > 1000:
d = {}
d['data'] = 'Quota Exceeded'
d['token'] = token
d['q'] = search_query
s = json.dumps(d)
self.response.out.write(s)
ban_time = 600 + 60 * 2 ** ((ipCount - 1000))
if ban_time > 7 * 24 * 60 * 60:
ban_time = 7 * 24 * 60 * 60
logging.info('Quota exceeded for ' + ip + ', count at ' + str(ipCount) + ', banned for ' + str(ban_time))
memcache.replace(ip, ipCount + 1, time=ban_time)
if (ipCount - 1001) % 100 == 0:
message = mail.EmailMessage(sender="IP Banning <[email protected]>",
subject="RPIDirectory IP " + ip + " Banned")
message.to = "[email protected]"
message.body = "IP: " + ip + "\nban time: " + str(ban_time) + "\nQuery: " + search_query + "\nHit Count: " + str(ipCount)
message.send()
logging.info("EMail sent about ip: " + ip)
return
memcache.replace(ip, ipCount + 1, time=600)
else:
memcache.add(ip, 1, time=600)
queries = map(str, search_query.split())
queries = sorted(queries)
query_string = ' AND '.join(queries)
d = {}
d["data"] = []
d["token"] = token
d["q"] = search_query
data = memcache.get(query_string)
if not data:
data = []
#Sort results by first name descending
expr_list = [search.SortExpression(
expression='first_name', default_value='',
direction=search.SortExpression.DESCENDING)]
# construct the sort options
sort_opts = search.SortOptions(expressions=expr_list)
offset_num = (page_num - 1) * page_size
query_options = search.QueryOptions(limit=page_size, offset=offset_num,
ids_only=True, sort_options=sort_opts)
results = search.Index(name=_INDEX_NAME).search(query=search.Query(
query_string=query_string, options=query_options))
for result in results:
rcsid = result.doc_id
r = Person.get_by_id(rcsid)
if r:
data.append(Person.buildMap(r))
memcache.add(query_string, data, time=2419200)
d["data"] = data
s = json.dumps(d)
self.response.out.write(s)
示例3: get
# 需要导入模块: from models import Person [as 别名]
# 或者: from models.Person import buildMap [as 别名]
def get(self):
self.response.headers['Content-Type'] = 'text/plain'
search_query = str(urllib.unquote(cgi.escape(self.request.get('q')).lower()[:100]))
name = str(urllib.unquote(cgi.escape(self.request.get('name')).lower()[:50]))
token = str(urllib.unquote(cgi.escape(self.request.get('token'))))
page_num = parse_int(urllib.unquote(cgi.escape(self.request.get('page_num'))), 1)
page_size = parse_int(urllib.unquote(cgi.escape(self.request.get('page_size'))), 20)
s = ''
for c in search_query:
if c in string.letters or c in string.digits or c == ' ':
s += c
search_query = s
if search_query + name == "":
d = {}
d['data'] = []
d['token'] = token
d['q'] = ""
s = json.dumps(d)
self.response.out.write(s)
return
if search_query == "":
search_query = name
if page_size > _PAGE_SIZE or page_size < 1:
page_size = _PAGE_SIZE
queries = map(str, search_query.split())
queries = sorted(queries)
query_string = ' AND '.join(queries)
d = {}
d["data"] = []
d["token"] = token
d["q"] = search_query
data = []
#Sort results by first name descending
expr_list = [search.SortExpression(
expression='first_name', default_value='',
direction=search.SortExpression.DESCENDING)]
# construct the sort options
sort_opts = search.SortOptions(expressions=expr_list)
offset_num = (page_num - 1) * page_size
query_options = search.QueryOptions(limit=page_size, offset=offset_num,
ids_only=True, sort_options=sort_opts)
results = search.Index(name=_INDEX_NAME).search(query=search.Query(
query_string=query_string, options=query_options))
for result in results:
rcsid = result.doc_id
r = Person.get_by_id(rcsid)
if r:
per = Person.buildMap(r)
per['name'] = per['name'].title()
data.append(per)
d["data"] = data
s = json.dumps(d)
self.response.out.write(s)