本文整理汇总了Python中user_portrait.global_utils.es_user_portrait.mget函数的典型用法代码示例。如果您正苦于以下问题:Python mget函数的具体用法?Python mget怎么用?Python mget使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了mget函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: show_out_uid
def show_out_uid(fields):
out_list = []
recommend_dict = r_out.hgetall("recommend_delete_list")
recommend_keys = recommend_dict.keys()
for iter_key in recommend_keys:
out_list.extend(json.loads(r_out.hget("recommend_delete_list",iter_key)))
if not out_list:
return out_list # no one is recommended to out
return_list = []
detail = es.mget(index="user_portrait", doc_type="user", body={"ids":out_list}, _source=True)['docs']
# extract the return dict with the field '_source'
filter_uid = all_delete_uid()
for i in range(len(out_list)):
if detail[i]['_source']['uid'] in filter_uid:
continue
detail_info = []
for item in fields:
if item == "topic":
detail_info.append(','.join(detail[i]['_source']['topic_string'].split("&")))
else:
detail_info.append(detail[i]['_source'][item])
return_list.append(detail_info)
return return_list
示例2: get_group_list
def get_group_list(task_name):
results = []
try:
es_results = es.get(index=index_name, doc_type=index_type, id=task_name)['_source']
except:
return results
#print 'es_result:', es_results['uid_list'], type(es_results['uid_list'])
uid_list = es_results['uid_list']
user_portrait_attribute = es.mget(index='user_portrait', doc_type='user', body={'ids':uid_list})['docs']
evaluate_max = get_evaluate_max()
for item in user_portrait_attribute:
uid = item['_id']
try:
source = item['_source']
uname = source['uname']
gender = source['gender']
location = source['location']
importance = source['importance']
normal_importance = math.log(importance / evaluate_max['importance'] * 9 + 1, 10) * 100
influence = source['influence']
normal_influence = math.log(influence / evaluate_max['influence'] * 9 + 1, 10) * 100
results.append([uid, uname, gender, location, normal_importance, normal_influence])
except:
results.append([uid])
return results
示例3: ajax_get_group_detail
def ajax_get_group_detail():
task_name = request.args.get('task_name','') # task_name
user = request.args.get('user', '')
_id = user + '-' + task_name
portrait_detail = []
top_activeness = get_top_influence("activeness")
top_influence = get_top_influence("influence")
top_importance = get_top_influence("importance")
search_result = es.get(index=index_group_manage, doc_type=doc_type_group, id=_id).get('_source', {})
if search_result:
try:
uid_list = json.loads(search_result['uid_list'])
except:
uid_list = search_result['uid_list']
if uid_list:
search_results = es.mget(index=portrait_index_name, doc_type=portrait_index_type, body={"ids":uid_list}, fields=SOCIAL_SENSOR_INFO)['docs']
for item in search_results:
temp = []
if item['found']:
for iter_item in SOCIAL_SENSOR_INFO:
if iter_item == "topic_string":
temp.append(item["fields"][iter_item][0].split('&'))
temp.append(item["fields"][iter_item][0].split('&'))
elif iter_item == "activeness":
temp.append(math.ceil(item["fields"][iter_item][0]/float(top_activeness)*100))
elif iter_item == "importance":
temp.append(math.ceil(item["fields"][iter_item][0]/float(top_importance)*100))
elif iter_item == "influence":
temp.append(math.ceil(item["fields"][iter_item][0]/float(top_influence)*100))
else:
temp.append(item["fields"][iter_item][0])
portrait_detail.append(temp)
return json.dumps(portrait_detail)
示例4: get_vary_detail_info
def get_vary_detail_info(vary_detail_dict, uid_list):
results = {}
#get uname
try:
user_portrait_result = es_user_portrait.mget(index=portrait_index_name, doc_type=portrait_index_type,\
body={'ids':uid_list})['docs']
except:
user_portrait_result = []
uname_dict = {}
for portrait_item in user_portrait_result:
uid = portrait_item['_id']
if portrait_item['found']==True:
uname = portrait_item['_source']['uname']
uname_dict[uid] = uname
else:
uname_dict[uid] = uid
#get new vary detail information
for vary_pattern in vary_detail_dict:
user_info_list = vary_detail_dict[vary_pattern]
new_pattern_list = []
for user_item in user_info_list:
uid = user_item[0]
uname= uname_dict[uid]
start_date = ts2datetime(int(user_item[1]))
end_date = ts2datetime(int(user_item[2]))
new_pattern_list.append([uid, uname, start_date, end_date])
results[vary_pattern] = new_pattern_list
return results
示例5: query_vary_top_k
def query_vary_top_k(index_name, doctype, top_k, sort_index="vary"):
query_body = {
"query": {
"match_all": {}
},
"size": top_k,
"sort": [{sort_index: {"order": "desc"}}]
}
result = es.search(index=index_name, doc_type=doctype, body=query_body)['hits']['hits']
uid_list = []
for item in result:
uid_list.append(item['_id'])
portrait_result = es_portrait.mget(index="user_portrait", doc_type="user", body={"ids":uid_list}, _source=True)['docs']
profile_result = es_profile.mget(index="weibo_user",doc_type="user", body={"ids":uid_list}, _source=True)['docs']
return_list = []
rank = 1
for i in range(len(result)):
info = ['','','','','']
info[0] = rank
if profile_result[i]['found']:
info[1] = profile_result[i]['_source'].get('photo_url','')
info[3] = profile_result[i]['_source'].get('nick_name','')
info[2] = result[i].get('_id','')
info[4] = result[i]['_source']['vary']
if portrait_result[i]['found']:
info.append('1')
else:
info.append('0')
return_list.append(info)
rank += 1
return return_list
示例6: search_group_sentiment_weibo
def search_group_sentiment_weibo(task_name, start_ts, sentiment):
weibo_list = []
#step1:get task_name uid
try:
group_result = es_group_result.get(index=group_index_name, doc_type=group_index_type,\
id=task_name, _source=False, fields=['uid_list'])
except:
group_result = {}
if group_result == {}:
return 'task name invalid'
try:
uid_list = group_result['fields']['uid_list']
except:
uid_list = []
if uid_list == []:
return 'task uid list null'
#step3: get ui2uname
uid2uname = {}
try:
user_portrait_result = es_user_portrait.mget(index=portrait_index_name, doc_type=portrait_index_type,\
body={'ids':uid_list}, _source=False, fields=['uname'])['docs']
except:
user_portrait_result = []
for item in user_portrait_result:
uid = item['_id']
if item['found']==True:
uname = item['fields']['uname'][0]
uid2uname[uid] = uname
#step4:iter date to search weibo
weibo_list = []
iter_date = ts2datetime(start_ts)
flow_text_index_name = flow_text_index_name_pre + str(iter_date)
#step4: get query_body
if sentiment != '2':
query_body = [{'terms': {'uid': uid_list}}, {'term':{'sentiment': sentiment}}, \
{'range':{'timestamp':{'gte':start_ts, 'lt': start_ts+DAY}}}]
else:
query_body = [{'terms':{'uid':uid_list}}, {'terms':{'sentiment': SENTIMENT_SECOND}},\
{'range':{'timestamp':{'gte':start_ts, 'lt':start_ts+DAY}}}]
try:
flow_text_result = es_flow_text.search(index=flow_text_index_name, doc_type=flow_text_index_type,\
body={'query':{'bool':{'must': query_body}}, 'sort': [{'timestamp':{'order':'asc'}}], 'size': MAX_VALUE})['hits']['hits']
except:
flow_text_result = []
for flow_text_item in flow_text_result:
source = flow_text_item['_source']
weibo = {}
weibo['uid'] = source['uid']
weibo['uname'] = uid2uname[weibo['uid']]
weibo['ip'] = source['ip']
try:
weibo['geo'] = '\t'.join(source['geo'].split('&'))
except:
weibo['geo'] = ''
weibo['text'] = source['text']
weibo['timestamp'] = source['timestamp']
weibo['sentiment'] = source['sentiment']
weibo_list.append(weibo)
return weibo_list
示例7: get_group_list
def get_group_list(task_name, submit_user):
results = []
task_id = submit_user + '-' + task_name
try:
es_results = es_group_result.get(index=group_index_name, doc_type=group_index_type, id=task_id)['_source']
except:
return results
uid_list = es_results['uid_list']
user_portrait_attribute = es_user_portrait.mget(index=portrait_index_name, doc_type=portrait_index_type, body={'ids':uid_list})['docs']
evaluate_max = get_evaluate_max()
for item in user_portrait_attribute:
uid = item['_id']
try:
source = item['_source']
uname = source['uname']
gender = source['gender']
location = source['location']
importance = source['importance']
normal_importance = math.log(importance / evaluate_max['importance'] * 9 + 1, 10) * 100
influence = source['influence']
normal_influence = math.log(influence / evaluate_max['influence'] * 9 + 1, 10) * 100
results.append([uid, uname, gender, location, normal_importance, normal_influence])
except:
results.append([uid, '', '', '', '', ''])
return results
示例8: search_history_delete
def search_history_delete(date):
return_list = []
if not date:
now_date = time.strftime('%Y%m%d',time.localtime(time.time()))
elif date:
now_date = date
else:
pass
fields = ['uid','uname','domain','topic_string','influence','importance','activeness']
temp = r_out.hget("decide_delete_list", now_date)
if temp:
history_uid_list = json.loads(r_out.hget("decide_delete_list", now_date))
if history_uid_list != []:
detail = es.mget(index="user_portrait", doc_type="user", body={"ids":history_uid_list}, _source=True)['docs']
for i in range(len(history_uid_list)):
detail_info = []
for item in fields:
if item == "topic_string":
detail_info.append(','.join(detail[i]['_source'][item].split("&")))
else:
detail_info.append(detail[i]['_source'][item])
return_list.append(detail_info)
return json.dumps(return_list)
示例9: show_out_uid
def show_out_uid(fields):
out_list = []
recommend_dict = r_out.hgetall("recommend_delete_list")
recommend_keys = recommend_dict.keys()
for iter_key in recommend_keys:
out_list.extend(json.loads(r_out.hget("recommend_delete_list",iter_key)))
if not out_list:
return out_list # no one is recommended to out
top_influence = get_top_influence("influence")
top_activeness = get_top_influence("activeness")
top_importance = get_top_influence("importance")
out_list = list(set(out_list))
return_list = []
detail = es.mget(index=portrait_index_name, doc_type=portrait_index_type, body={"ids":out_list}, _source=True)['docs']
# extract the return dict with the field '_source'
filter_uid = all_delete_uid()
for i in range(len(out_list)):
if detail[i]['_source']['uid'] in filter_uid:
continue
detail_info = []
for item in fields:
if item == "topic":
detail_info.append(','.join(detail[i]['_source']['topic_string'].split("&")))
elif item == "influence":
detail_info.append(math.ceil(detail[i]["_source"][item]/float(top_influence)*100))
elif item == "importance":
detail_info.append(math.ceil(detail[i]["_source"][item]/float(top_importance)*100))
elif item == "activeness":
detail_info.append(math.ceil(detail[i]["_source"][item]/float(top_activeness)*100))
else:
detail_info.append(detail[i]['_source'][item])
return_list.append(detail_info)
return return_list
示例10: search_history_delete
def search_history_delete(date):
return_list = []
now_date = date
top_influence = get_top_influence("influence")
top_activeness = get_top_influence("activeness")
top_importance = get_top_influence("importance")
fields = ['uid','uname','domain','topic_string','influence','importance','activeness']
temp = r_out.hget("decide_delete_list", now_date)
if temp:
history_uid_list = json.loads(r_out.hget("decide_delete_list", now_date))
if history_uid_list != []:
detail = es.mget(index=portrait_index_name, doc_type=portrait_index_type, body={"ids":history_uid_list}, _source=True)['docs']
for i in range(len(history_uid_list)):
detail_info = []
for item in fields:
if item == "topic_string":
detail_info.append(','.join(detail[i]['_source'][item].split("&")))
elif item == "influence":
detail_info.append(math.ceil(detail[i]["_source"][item]/float(top_influence)*100))
elif item == "importance":
detail_info.append(math.ceil(detail[i]["_source"][item]/float(top_importance)*100))
elif item == "activeness":
detail_info.append(math.ceil(detail[i]["_source"][item]/float(top_activeness)*100))
else:
detail_info.append(detail[i]['_source'][item])
return_list.append(detail_info)
return json.dumps(return_list)
示例11: compare_user_portrait_new
def compare_user_portrait_new(uid_list):
try:
user_portrait_result = es.mget(index=portrait_index_name, doc_type=portrait_index_type,\
body={'ids':uid_list})['docs']
except:
user_portrait_result = []
if user_portrait_result == []:
return 'uid_list not exist'
#get max evaluate:
max_result = get_evaluate_max()
user_result = {}
#get user psycho status from flow_text
user_psycho_status_result = get_psycho_status(uid_list)
#iter to get user attr
for item in user_portrait_result:
if item['found'] != True:
return 'uid_list not exist'
uid = item['_id']
user_result[uid] = {}
source = item['_source']
#attr: uname
user_result[uid]['uname'] = source['uname']
#attr: location
user_result[uid]['location'] = source['location']
#attr: evaluate index
importance = source['importance']
normal_importance = math.log(importance/ max_result['importance'] * 9 + 1, 10)
user_result[uid]['importance'] = int(normal_importance * 100)
influence = source['influence']
normal_influence = math.log(influence / max_result['influence'] * 9 + 1, 10)
user_result[uid]['influence'] = int(normal_influence * 100)
activeness = source['activeness']
normal_activeness = math.log(activeness / max_result['activeness'] * 9 + 1, 10)
user_result[uid]['activeness'] = int(normal_activeness * 100)
#attr: domain
user_result[uid]['domain'] = source['domain']
#attr: topic
topic_string = source['topic_string']
user_result[uid]['topic'] = topic_string.split('&')
#attr: activity geo dict
activity_geo_dict_list = json.loads(source['activity_geo_dict'])
week_activity_geo_list = activity_geo_dict_list[-7:]
week_geo_result = {}
for day_geo_dict in week_activity_geo_list:
for geo_item in day_geo_dict:
try:
week_geo_result[geo_item] += 1
except:
week_geo_result[geo_item] = 1
sort_week_geo_result = sorted(week_geo_result.items(), key=lambda x:x[1], reverse=True)
user_result[uid]['activity_geo'] = [geo_item[0] for geo_item in sort_week_geo_result[:2]]
#attr: keywords
user_result[uid]['keywords'] = json.loads(source['keywords'])
#attr: hashtag
user_result[uid]['hashtag'] = json.loads(source['hashtag_dict'])
#attr: psycho status
user_result[uid]['psycho_status'] = user_psycho_status_result[uid]
return user_result
示例12: submit_identify_in_uname
def submit_identify_in_uname(input_data):
date = input_data['date']
submit_user = input_data['user']
operation_type = input_data['operation_type']
upload_data = input_data['upload_data']
# get uname list from upload data
uname_list_pre = upload_data.split('\n')
uname_list = [item.split('\r')[0] for item in uname_list_pre]
uid_list = []
have_in_user_list = []
invalid_user_list = []
valid_uname_list = []
#step1: get uid list from uname
profile_exist_result = es_user_profile.search(index=profile_index_name, doc_type=profile_index_type, body={'query':{'terms':{'nick_name': uname_list}}}, _source=False, fields=['nick_name'])['hits']['hits']
for profile_item in profile_exist_result:
uid = profile_item['_id']
uid_list.append(uid)
uname = profile_item['fields']['nick_name'][0]
valid_uname_list.append(uname)
invalid_user_list = list(set(uname_list) - set(valid_uname_list))
if len(invalid_user_list) != 0:
return False, 'invalid user info', invalid_user_list
#step2: filter user not in user_portrait and compute
#step2.1: identify in user_portrait
new_uid_list = []
exist_portrait_result = es_user_portrait.mget(index=portrait_index_name, doc_type=portrait_index_type, body={'ids': uid_list})['docs']
new_uid_list = [exist_item['_id'] for exist_item in exist_portrait_result if exist_item['found']==False]
have_in_user_list = [exist_item['_id'] for exist_item in exist_portrait_result if exist_item['found']==True]
if not new_uid_list:
return False, 'all user in'
#step2.2: identify in compute
new_uid_set = set(new_uid_list)
compute_set = set(r.hkeys('compute'))
in_uid_list = list(new_uid_set - compute_set)
if not in_uid_list:
return False, 'all user in'
#step3: save submit
hashname_submit = 'submit_recomment_' + date
hashname_influence = 'recomment_' + date + '_influence'
hashname_sensitive = 'recomment_' + date + '_sensitive'
submit_user_recomment = 'recomment_' + submit_user + '_' + str(date)
auto_recomment_set = set(r.hkeys(hashname_influence)) | set(r.hkeys(hashname_sensitive))
#identify final submit user list
final_submit_user_list = []
for in_item in in_uid_list:
if in_item in auto_recomment_set:
tmp = json.loads(r.hget(hashname_submit, in_item))
recommentor_list = tmp['operation'].split('&')
recommentor_list.append(str(submit_user))
new_list = list(set(recommentor_list))
tmp['operation'] = '&'.join(new_list)
else:
tmp = {'system':'0', 'operation': submit_user}
if operation_type == 'submit':
r.hset(hashname_submit, in_item, json.dumps(tmp))
r.hset(submit_user_recomment, in_item, '0')
final_submit_user_list.append(in_item)
return True, invalid_user_list, have_in_user_list, final_submit_user_list
示例13: compare_user_portrait
def compare_user_portrait(uid_list):
user_portrait_result = {}
index_name = 'user_portrait'
index_type = 'user'
user_result = es.mget(index=index_name, doc_type=index_type, body={'ids':uid_list})['docs']
#user_portrait_result = [item['_source'] for item in user_result]
#print 'user_result:', user_portrait_result
for item in user_result:
uid = item['_id']
user_portrait_result[uid] = {}
try:
source = item['_source']
except:
next
try:
psycho_status = json.loads(source['psycho_status'])
except:
pasycho_status = {}
try:
psycho_feature = json.loads(source['psycho_feature'])
except:
psycho_feature = {}
try:
activity_geo_dict = json.loads(source['activity_geo_dict'])
sort_activity_geo = sorted(activity_geo_dict.items(), key=lambda x:x[1], reverse=True)
activity_geo_list = sort_activity_geo[:2]
activity_list = []
for item in activity_geo_list:
city_list = item[0].split('\t')
city = city_list[len(city_list)-1]
activity_list.append(city)
except:
activity_geo = []
try:
hashtag_dict = json.loads(source['hashtag_dict'])
except:
hashtag_dict = {}
user_portrait_result[uid] = {
'uname': source['uname'],
'gender': source['gender'],
'location': source['location'],
'importance': source['importance'],
'activeness': source['activeness'],
'influence': source['influence'],
'fansnum':source['fansnum'],
'statusnum':source['statusnum'],
'friendsnum': source['friendsnum'],
'domain': source['domain'],
'topic': json.loads(source['topic']),
'keywords': json.loads(source['keywords']),
'psycho_status': psycho_status,
'psycho_feature': psycho_feature,
'activity_geo': activity_list,
'hashtag_dict': hashtag_dict
}
#print 'user_portrait_result:', user_portrait_result
return user_portrait_result
示例14: search_portrait_user_in_activity
def search_portrait_user_in_activity(es, number, active_index, active_type, portrait_index, portrait_type, field="user_index"):
return_list = []
index_exist = es.indices.exists(index=active_index)
if not index_exist:
return "no active_index exist"
sys.exit(0)
count_s = 0
count_c = 0
start = 0
rank = 1
while 1:
search_list = []
user_list = search_k(es, active_index, active_type, start, field, 100)
start += 100
for item in user_list:
if field == "vary":
uid = item.get('uid', '0') # obtain uid, notice "uid" or "user"
else:
uid = item.get('user', '0')
search_list.append(uid) # uid list
search_result = es_portrait.mget(index=portrait_index, doc_type=portrait_type, body={"ids": search_list}, _source=True)["docs"]
profile_result = es_profile.mget(index="weibo_user", doc_type="user", body={"ids": search_list}, _source=True)["docs"]
key_list = ["origin_weibo_retweeted_total_number", "origin_weibo_retweeted_average_number", "origin_weibo_retweeted_top_number", "origin_weibo_retweeted_brust_average", \
"origin_weibo_comment_total_number", "origin_weibo_comment_average_number", "origin_weibo_comment_top_number", "origin_weibo_retweeted_brust_average", \
"retweeted_weibo_retweeted_total_number", "retweeted_weibo_retweeted_average_number", "retweeted_weibo_retweeted_top_number", "retweeted_weibo_retweeted_brust_average", \
"retweeted_weibo_comment_total_number", "retweeted_weibo_comment_average_number", "retweeted_weibo_comment_top_number", "retweeted_weibo_retweeted_brust_average"]
for item in search_result:
if item["found"]:
info = ['','','','','','']
info[0] = rank
index = search_result.index(item)
if profile_result[index]['found']:
info[1] = profile_result[index]['_source'].get('photo_url','')
info[3] = profile_result[index]['_source'].get('nick_name','')
info[2] = search_result[index].get('_id','')
info[4] = user_list[index]['user_index']
info[5] = "1"
if field == 'origin_weibo_retweeted_brust_average':
info.append(user_list[index]['origin_weibo_retweeted_brust_average'])
for key in key_list:
info.append(user_list[index][key])
elif field == 'origin_weibo_comment_brust_average':
info.append(user_list[index]['origin_weibo_comment_brust_average'])
for key in key_list:
info.append(user_list[index][key])
else:
pass
return_list.append(info)
rank += 1
count_c += 1
if count_c >= int(number):
return return_list
示例15: show_keywords_rank
def show_keywords_rank(task_id, sort_type, count):
try:
task_found = es_network_task.get(index=network_keywords_index_name, \
doc_type=network_keywords_index_type, id=task_id)['_source']
except:
task_found = {}
return task_found
search_results = json.loads(task_found['results'])
sort_results = search_results[sort_type]
results = []
uid_list = []
sort_list = []
for source_uid, sort_value in sort_results:
uid_list.append(source_uid)
sort_list.append(sort_value)
# 查看背景信息
if uid_list:
profile_result = es_user_profile.mget(index=profile_index_name, doc_type=profile_index_type, body={"ids":uid_list})["docs"]
for item in profile_result:
_id = item['_id']
index = profile_result.index(item)
tmp = []
if item['found']:
item = item['_source']
tmp.append(item['uid'])
tmp.append(item['nick_name'])
tmp.append(item['user_location'])
else:
tmp.extend([_id,'',''])
value = sort_list[index]
tmp.append(value)
results.append(tmp)
if uid_list:
count = 0
history_result = es_bci_history.mget(index=bci_history_index_name, doc_type=bci_history_index_type, body={"ids":uid_list})["docs"]
for item in history_result:
if item['found']:
item = item['_source']
results[count].extend([item['user_fansnum'], item['weibo_month_sum']])
else:
results[count].extend(['',''])
count += 1
if uid_list:
count = 0
portrait_result = es_user_portrait.mget(index=portrait_index_name, doc_type=portrait_index_type, body={"ids":uid_list})["docs"]
for item in portrait_result:
if item['found']:
results[count].append("1")
else:
results[count].append("0")
count += 1
return results