本文整理汇总了Python中sensitive_user_portrait.time_utils.ts2datetime函数的典型用法代码示例。如果您正苦于以下问题:Python ts2datetime函数的具体用法?Python ts2datetime怎么用?Python ts2datetime使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了ts2datetime函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_user_sensitive_words
def get_user_sensitive_words(uid):
user_sensitive_words_dict = {}
if RUN_TYPE:
now_ts = time.time()
now_date = ts2datetime(now_ts) # 2015-09-22
else:
now_date = "2013-09-08"
ts = datetime2ts(now_date)
#test
#ts = datetime2ts('2013-09-08')
for i in range(1,8):
ts = ts - 3600*24
date = ts2datetime(ts).replace('-','')
results = r_cluster.hget('sensitive_'+str(ts), uid)
if results:
sensitive_words_dict = json.loads(results)
for word in sensitive_words_dict:
if user_sensitive_words_dict.has_key(word):
user_sensitive_words_dict[word] += sensitive_words_dict[word]
else:
user_sensitive_words_dict[word] = sensitive_words_dict[word]
sort_sensitive_words_dict = sorted(user_sensitive_words_dict.items(), key=lambda x:x[1], reverse=True)
return sort_sensitive_words_dict
示例2: search_mention
def search_mention(uid, sensitive):
date = ts2datetime(time.time()).replace('-','')
stat_results = dict()
results = dict()
test_ts = time.time()
test_ts = datetime2ts('2013-09-07')
for i in range(0,7):
ts = test_ts -i*24*3600
date = ts2datetime(ts).replace('-', '')
if not sensitive:
at_temp = r_cluster.hget('at_' + str(date), str(uid))
else:
at_temp = r_cluster.hget('sensitive_at_' + str(date), str(uid))
if not at_temp:
continue
else:
result_dict = json.loads(at_temp)
for at_uid in result_dict:
if stat_results.has_key(at_uid):
stat_results[uid] += result_dict[at_uid]
else:
stat_results[uid] = result_dict[at_uid]
if not stat_results:
return [None, 0]
in_status = identify_uid_list_in(result_dict.keys())
for at_uid in result_dict:
if at_uid in in_status:
results[at_uid] = [result_dict[at_uid], '1']
else:
results[at_uid] = [result_dict[at_uid], '0']
sorted_results = sorted(results.items(), key=lambda x:x[1][0], reverse=True)
return [sorted_results[0:20], len(results)]
示例3: get_group_user_track
def get_group_user_track(uid):
results = []
# step1:get user_portrait activity_geo_dict
try:
portrait_result = es_user_portrait.get(
index=portrait_index_name, doc_type=portrait_index_type, id=uid, _source=False, fields=["activity_geo_dict"]
)
except:
portrait_result = {}
if portrait_result == {}:
return "uid is not in user_portrait"
activity_geo_dict = json.loads(portrait_result["fields"]["activity_geo_dict"][0])
now_date_ts = datetime2ts(ts2datetime(int(time.time())))
start_ts = now_date_ts - DAY * len(activity_geo_dict)
# step2: iter date to get month track
for geo_item in activity_geo_dict:
iter_date = ts2datetime(start_ts)
sort_day_dict = sorted(geo_item.items(), key=lambda x: x[1], reverse=True)
if sort_day_dict:
results.append([iter_date, sort_day_dict[0][0]])
else:
results.append([iter_date, ""])
start_ts = start_ts + DAY
return results
示例4: count_hot_uid
def count_hot_uid(uid, start_time, stop_time):
query_body = {
"query":{
"filtered":{
"filter":{
"bool":{
"must":[
{"range":{
"timestamp":{
"gte":start_time,
"lt": stop_time
}
}},
{"term": {"root_uid": uid}}
]
}
}
# "query":{
# "bool":{
# "should":[
# ]
# }
# }
}
}
}
count = 0
datetime = ts2datetime(float(stop_time))
index_name = flow_text_index_name_pre + datetime
exist_es = es_text.indices.exists(index_name)
if exist_es:
count = es_text.count(index=index_name, doc_type=flow_text_index_type, body=query_body)["count"]
else:
count = 0
datetime_1 = ts2datetime(float(start_time))
if datetime_1 == datetime:
pass
else:
ts = float(stop_time)
while 1:
ts = ts-day_time
datetime = ts2datetime(ts)
index_name = flow_text_index_name_pre + datetime
exist_es = es_text.indices.exists(index_name)
if exist_es:
count = es_text.count(index=index_name, doc_type=flow_text_index_type, body=query_body)["count"]
else:
count += 0
if datetime_1 == datetime:
break
return count
示例5: query_hot_mid
def query_hot_mid(ts, keywords_list, text_type,size=100):
query_body = {
"query":{
"filtered":{
"filter":{
"bool":{
"must":[
{"range":{
"timestamp":{
"gte":ts - time_interval,
"lt": ts
}
}},
{"terms": {"keywords_string": keywords_list}},
{"term": {"message_type": "0"}}
]
}
}
}
},
"aggs":{
"all_interests":{
"terms":{"field": "root_mid", "size": size}
}
}
}
datetime = ts2datetime(ts)
datetime_1 = ts2datetime(ts-time_interval)
index_name = flow_text_index_name_pre + datetime
exist_es = es_text.indices.exists(index_name)
index_name_1 = flow_text_index_name_pre + datetime_1
exist_bool_1 = es_text.indices.exists(index_name_1)
print datetime, datetime_1
if datetime == datetime_1 and exist_es:
search_results = es_text.search(index=index_name, doc_type=flow_text_index_type, body=query_body)["aggregations"]["all_interests"]["buckets"]
elif datetime != datetime_1 and exist_bool_1:
search_results = es_text.search(index=index_name_1, doc_type=flow_text_index_type, body=query_body)["aggregations"]["all_interests"]["buckets"]
else:
search_results = []
hot_mid_list = []
if search_results:
for item in search_results:
print item
temp = []
temp.append(item['key'])
temp.append(item['doc_count'])
hot_mid_list.append(temp)
#print hot_mid_list
return hot_mid_list
示例6: get_user_geo
def get_user_geo(uid):
results = []
user_geo_result = {}
user_ip_dict = {}
user_ip_result = {} # ordinary ip
user_sensitive_ip_result = {} # sensitive ip
now_ts = time.time()
now_date = ts2datetime(now_ts) # 2015-09-22
ts = datetime2ts(now_date)
#test
ts = datetime2ts('2013-09-08')
for i in range(1,8):
ts = ts - 3600*24
date = ts2datetime(ts).replace('-','')
results = r_cluster.hget('ip_'+str(date), uid)
sensitive_results = r_cluster.hget('sensitive_ip'+str(date), uid)
if results:
ip_results = json.loads(results)
for ip in ip_results:
if user_ip_result.has_key(ip):
user_ip_result[ip] += ip_results[ip]
else:
user_ip_result[ip] = ip_results[ip]
if sensitive_results:
sensitive_ip_results = json.loads(sensitive_results)
for ip in sensitive_ip_results:
if user_sensitive_ip_result.has_key(ip):
user_sensitive_ip_result[ip] += sensitive_ip_results[ip]
else:
user_sensitive_ip_result[ip] = sensitive_ip_results[ip]
ordinary_key_set = set(user_ip_result.keys())
sensitive_key_set = set(user_sensitive_ip_result.keys())
for key in sensitive_key_set:
if key in ordinary_key_set:
user_ip_result[key] += user_sensitive_ip_result[key]
else:
user_ip_result[key] = user_sensitive_ip_result[key]
user_geo_dict = ip2geo(user_ip_result)
sorted_user_geo_dict = sorted(user_geo_dict.items(), key=lambda x:x[1], reverse=True)
sensitive_user_geo_dict = ip2geo(user_sensitive_ip_result)
sorted_sensitive_user_geo_dict = sorted(sensitive_user_geo_dict.items(), key=lambda x:x[1], reverse=True)
return_list = []
return_list = [sorted_user_geo_dict, sorted_sensitive_user_geo_dict] # total and sensitive
return return_list
示例7: get_influence_content
def get_influence_content(uid, timestamp_from, timestamp_to):
weibo_list = []
# split timestamp range to new_range_dict_list
from_date_ts = datetime2ts(ts2datetime(timestamp_from))
to_date_ts = datetime2ts(ts2datetime(timestamp_to))
new_range_dict_list = []
if from_date_ts != to_date_ts:
iter_date_ts = from_date_ts
while iter_date_ts < to_date_ts:
iter_next_date_ts = iter_date_ts + DAY
new_range_dict_list.append({"range": {"timestamp": {"gte": iter_date_ts, "lt": iter_next_date_ts}}})
iter_date_ts = iter_next_date_ts
if new_range_dict_list[0]["range"]["timestamp"]["gte"] < timestamp_from:
new_range_dict_list[0]["range"]["timestamp"]["gte"] = timestamp_from
if new_range_dict_list[-1]["range"]["timestamp"]["lt"] > timestamp_to:
new_range_dict_list[-1]["range"]["timestamp"]["lt"] = timestamp_to
else:
new_range_dict_list = [{"range": {"timestamp": {"gte": timestamp_from, "lt": timestamp_to}}}]
# iter date to search flow_text
iter_result = []
for range_item in new_range_dict_list:
range_from_ts = range_item["range"]["timestamp"]["gte"]
range_from_date = ts2datetime(range_from_ts)
flow_text_index_name = flow_text_index_name_pre + range_from_date
query = []
query.append({"term": {"uid": uid}})
query.append(range_item)
try:
flow_text_exist = es_flow_text.search(
index=flow_text_index_name,
doc_type=flow_text_index_type,
body={"query": {"bool": {"must": query}}, "sort": [{"timestamp": "asc"}]},
)["hits"]["hits"]
except:
flow_text_exist = []
iter_result.extend(flow_text_exist)
# get weibo list
for item in flow_text_exist:
source = item["_source"]
weibo = {}
weibo["timestamp"] = ts2date(source["timestamp"])
weibo["ip"] = source["ip"]
weibo["text"] = source["text"]
if source["geo"]:
weibo["geo"] = "\t".join(source["geo"].split("&"))
else:
weibo["geo"] = ""
weibo_list.append(weibo)
return weibo_list
示例8: get_network
def get_network(task_exist):
task_name = task_exist['task_name']
submit_date = task_exist['submit_date']
submit_ts = date2ts(submit_date)
time_segment = 24*3600
now_ts = time.time()
now_date = ts2datetime(now_ts)
now_date_ts = datetime2ts(now_date)
#test
now_date_ts = datetime2ts('2013-09-07')
iter_date_ts = now_date_ts
iter_count = 1
date_list = []
top_list_dict = {}
while True:
if iter_count >= 8 or iter_date_ts < submit_ts:
break
iter_date = ts2datetime(iter_date_ts)
date_list.append(iter_date)
key = 'inner_' + str(iter_date)
try:
task_date_result = es.get(index=monitor_index_name, doc_type=task_name, id=key)['_source']
except:
task_date_result = {}
#print 'task_name, key, task_date_result:', task_name, key, task_date_result
iter_field = ['top1', 'top2', 'top3', 'top4', 'top5']
for field in iter_field:
user_count_item = json.loads(task_date_result[field])
uid = user_count_item[0]
uname = uid2uname(uid)
count = user_count_item[1]
try:
top_list_dict[field].append([uid, uname, count])
except:
top_list_dict[field] = [[uid, uname, count]]
iter_date_ts -= time_segment
# get inner-retweet group from es---field: inner_graph
'''
try:
inner_graph = json.loads(task_date_result['inner_graph'])
except:
inner_graph = {}
'''
abnormal_index = compute_inner_polarization(top_list_dict)
return [date_list, top_list_dict, abnormal_index]
示例9: end_track_task
def end_track_task(task_name):
status = 0
try:
task_exist = es.get(index=index_name, doc_type=index_type, id=task_name)['_source']
except:
return 'task name not exist'
task_status = task_exist['status']
if status == '0':
return 'task have end'
else:
task_exist['status'] = 0
# made end time
now_ts = time.time()
now_date = ts2datetime(now_ts)
now_date_ts = datetime2ts(now_date)
time_segment = int((now_ts - now_date_ts) / 900) + 1
end_ts = now_date_ts + time_segment * 900
end_date = ts2date(end_ts)
task_exist['end_date'] = end_date
task_user = task_exist['uid_list']
status = change_user_count(task_user)
if status == 0:
return 'change user task count fail'
else:
es.index(index=index_name, doc_type=index_type, id=task_name, body=task_exist)
status = delete_task_redis(task_name)
if status == 0:
return 'delete task from redis fail'
else:
return 'success change status to end'
示例10: ajax_upload_track_file
def ajax_upload_track_file():
results = {}
upload_data = request.form['upload_data']
task_name = request.form['task_name']
state = request.args.form['state']
now_ts = time.time()
now_date = ts2datetime(now_ts)
now_date_ts = datetime2ts(now_date)
time_segment = int((now_ts - now_Date_ts) / 900) + 1
trans_ts = now_date_ts + time_segment * 900
line_list = upload_data.split('\n')
input_data = {}
#submit task and start time is 15min multiple
input_data['submit_date'] = trans_ts
input_data['task_name'] = task_name
uid_list = []
for line in line_list:
uid = line[:10]
if len(uid)==10:
uid_list.append(uid)
input_data['uid_list'] = uid_list
input_data['status'] = 1 # status show the track task is doing or end; doing 1, end 0
input_data['count'] = len(uid_list)
status = submit_track_task(input_data)
return json.dumps(status)
示例11: influence_distribute
def influence_distribute():
row = [0, 200, 500, 700, 900, 1100, 10000]
result = []
ts = time.time()
ts = datetime2ts('2013-09-08') # test
ts = ts - 8*3600*24
for j in range(7):
detail = []
ts += 3600*24
date = ts2datetime(ts).replace('-', '')
for i in range(6):
low_limit = row[i]
upper_limit = row[i+1]
query_body = {
"query": {
"filtered": {
"filter": {
"range": {
date: {
"gte": low_limit,
"lt": upper_limit
}
}
}
}
}
}
number = es.count(index='copy_sensitive_user_portrait', doc_type="user", body=query_body)['count']
detail.append(number)
result.append(detail)
return [row, result]
示例12: sort_sensitive_text
def sort_sensitive_text(uid):
sensitive_text = search_sensitive_text(uid)
text_all = []
if sensitive_text:
for item in sensitive_text:
text_detail = []
item = item["_source"]
if not item["sensitive"]:
continue
text = item["text"].encode("utf-8", "ignore")
sentiment_dict = json.loads(item["sentiment"])
if not sentiment_dict:
sentiment = 0
else:
positive = len(sentiment_dict.get("126", {}))
negetive = (
len(sentiment_dict.get("127", {}))
+ len(sentiment_dict.get("128", {}))
+ len(sentiment_dict.get("129", {}))
)
if positive > negetive:
sentiment = 1
elif positive < negetive:
sentiment = -1
else:
sentiment = 0
ts = item["timestamp"]
uid = item["uid"]
mid = item["mid"]
message_type = item.get("message_type", 0)
date = ts2datetime(float(ts)).replace("-", "")
try:
bci_result = es.get(index=date, doc_type="bci", id=uid)["_source"]
if int(message_type) == 1:
retweeted_number = bci_result["s_origin_weibo_retweeted_detail"].get(mid)
comment_number = bci_result["s_origin_weibo_comment_detail"].get(mid)
elif int(message_type) == 2:
retweeted_number = bci_result["s_retweeted_weibo_retweeted_detail"].get(mid)
comment_number = bci_result["s_retweeted_weibo_comment_detail"].get(mid)
else:
retweeted_number = 0
comment_number = 0
except:
retweeted_number = 0
comment_number = 0
single_sw = item.get("sensitive_words", {})
if single_sw:
sw = json.loads(single_sw).keys()
else:
# print item
sw = []
geo = item["geo"]
retweeted_link = extract_uname(text)
text_detail.extend(
[ts, geo, text, sw, retweeted_link, sentiment, message_type, retweeted_number, comment_number]
)
text_all.append(text_detail)
return text_all
示例13: get_text_index
def get_text_index(date):
now_ts = datetime2ts(date)
index_list = []
for i in range(7):
ts = now_ts - i*DAY
tmp_index = pre_text_index + ts2datetime(ts)
index_list.append(tmp_index)
return index_list
示例14: ajax_show_sensitive_history_in
def ajax_show_sensitive_history_in():
results = []
now_date = ts2datetime(time.time())
date = request.args.get('date', now_date) # in date:2013-09-01
if str(date) == "all":
ts = time.time()
now_ts = datetime2ts(now_date)
for i in range(7):
ts = now_ts - i*24*3600
date = ts2datetime(ts)
temp = show_in_history(date, 1)
results.extend(temp)
else:
results = show_in_history(date, 1) # history in, include status
if results:
return json.dumps(results)
else:
return json.dumps([])
示例15: ajax_show_influence_history_in
def ajax_show_influence_history_in():
results = []
now_date = ts2datetime(time.time())
date = request.args.get('date', now_date)
if str(date) == "all":
ts = time.time()
now_ts = datetime2ts('2013-09-07')
for i in range(7):
ts = now_ts - i*24*3600
date = ts2datetime(ts)
date = str(date).replace('-', '')
temp = show_in_history(date, 1)
results.extend(temp)
else:
date = str(date).replace('-','')
results = show_in_history(date, 0) # history in, include status
if results:
return json.dumps(results)
else:
return json.dumps([])